diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java index 024953d92c14..a456d821b8cc 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java @@ -372,6 +372,13 @@ public final class ScmConfigKeys { "ozone.scm.pipeline.per.metadata.disk"; public static final int OZONE_SCM_PIPELINE_PER_METADATA_VOLUME_DEFAULT = 2; + + public static final String OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN = + "ozone.scm.datanode.ratis.volume.free-space.min"; + + public static final String + OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN_DEFAULT = "1GB"; + // Max timeout for pipeline to stay at ALLOCATED state before scrubbed. public static final String OZONE_SCM_PIPELINE_ALLOCATED_TIMEOUT = "ozone.scm.pipeline.allocated.timeout"; diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index 81d174c11591..a66ee9368775 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -2901,4 +2901,15 @@ directory deleting service per time interval. + + + ozone.scm.datanode.ratis.volume.free-space.min + 1GB + OZONE, DATANODE + Minimum amount of storage space required for each ratis + volume on a datanode to hold a new pipeline. + Datanodes with all its ratis volumes with space under this value + will not be allocated a pipeline or container replica. + + diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java index ce0223b61f13..73760982d32b 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java @@ -24,7 +24,9 @@ import com.google.common.base.Preconditions; import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto; import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementStatusDefault; import org.apache.hadoop.hdds.scm.exceptions.SCMException; @@ -37,6 +39,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN_DEFAULT; + /** * This policy implements a set of invariants which are common * for all basic placement policies, acts as the repository of helper @@ -169,13 +174,35 @@ public boolean hasEnoughSpace(DatanodeDetails datanodeDetails, long sizeRequired) { Preconditions.checkArgument(datanodeDetails instanceof DatanodeInfo); + long metaSizeRequired = (long) conf.getStorageSize( + OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN, + OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN_DEFAULT, + StorageUnit.BYTES); + + boolean enoughForData = false; + boolean enoughForMeta = false; + DatanodeInfo datanodeInfo = (DatanodeInfo) datanodeDetails; for (StorageReportProto reportProto : datanodeInfo.getStorageReports()) { if (reportProto.getRemaining() > sizeRequired) { - return true; + enoughForData = true; + break; } } - return false; + + if (!enoughForData) { + return false; + } + + for (MetadataStorageReportProto reportProto + : datanodeInfo.getMetadataStorageReports()) { + if (reportProto.getRemaining() > metaSizeRequired) { + enoughForMeta = true; + break; + } + } + + return enoughForData && enoughForMeta; } /** diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java index edd616fb50d9..bf1d64f9a269 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java @@ -143,6 +143,20 @@ public List getStorageReports() { } } + /** + * Returns the storage reports associated with this datanode. + * + * @return list of storage report + */ + public List getMetadataStorageReports() { + try { + lock.readLock().lock(); + return metadataStorageReports; + } finally { + lock.readLock().unlock(); + } + } + /** * Returns count of healthy volumes reported from datanode. * @return count of healthy volumes diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java index c43373875a70..b9aba123773b 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java @@ -41,6 +41,9 @@ import java.util.Set; import java.util.stream.Collectors; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN_DEFAULT; + /** * Pipeline placement policy that choose datanodes based on load balancing * and network topology to supply pipeline creation. @@ -158,15 +161,21 @@ List filterViableNodes( ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT, StorageUnit.BYTES); + long metaSizeRequired = (long) conf.getStorageSize( + OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN, + OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN_DEFAULT, + StorageUnit.BYTES); + // filter nodes that don't even have space for one container List canHoldList = healthyNodes.stream().filter(d -> hasEnoughSpace(d, sizeRequired)).collect(Collectors.toList()); if (canHoldList.size() < nodesRequired) { msg = String.format("Pipeline creation failed due to no sufficient" + - " healthy datanodes with enough space for even a single container." + - " Required %d. Found %d. Container size %d.", - nodesRequired, canHoldList.size(), sizeRequired); + " healthy datanodes with enough space for container data and " + + "metadata. Required %d. Found %d. Container data required %d, " + + "metadata required %d.", + nodesRequired, canHoldList.size(), sizeRequired, metaSizeRequired); LOG.warn(msg); throw new SCMException(msg, SCMException.ResultCodes.FAILED_TO_FIND_SUITABLE_NODE); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java index 57e1a25b766a..4d364efa31ce 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java @@ -60,7 +60,9 @@ import org.apache.hadoop.hdds.protocol .proto.StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto; import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.StorageReportProto; + .StorageContainerDatanodeProtocolProtos.StorageReportProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.StorageTypeProto; import org.apache.hadoop.hdds.scm.node.SCMNodeManager; @@ -149,7 +151,7 @@ public static List getListOfRegisteredDatanodeDetails( * @return NodeReportProto */ public static NodeReportProto getRandomNodeReport() { - return getRandomNodeReport(1); + return getRandomNodeReport(1, 1); } /** @@ -157,12 +159,15 @@ public static NodeReportProto getRandomNodeReport() { * * @param numberOfStorageReport number of storage report this node report * should have + * @param numberOfMetadataStorageReport number of metadata storage report + * this node report should have * @return NodeReportProto */ - public static NodeReportProto getRandomNodeReport(int numberOfStorageReport) { + public static NodeReportProto getRandomNodeReport(int numberOfStorageReport, + int numberOfMetadataStorageReport) { UUID nodeId = UUID.randomUUID(); return getRandomNodeReport(nodeId, File.separator + nodeId, - numberOfStorageReport); + numberOfStorageReport, numberOfMetadataStorageReport); } /** @@ -172,42 +177,41 @@ public static NodeReportProto getRandomNodeReport(int numberOfStorageReport) { * @param nodeId datanode id * @param basePath base path of storage directory * @param numberOfStorageReport number of storage report + * @param numberOfMetadataStorageReport number of metadata storage report * * @return NodeReportProto */ public static NodeReportProto getRandomNodeReport(UUID nodeId, - String basePath, int numberOfStorageReport) { + String basePath, int numberOfStorageReport, + int numberOfMetadataStorageReport) { List storageReports = new ArrayList<>(); for (int i = 0; i < numberOfStorageReport; i++) { storageReports.add(getRandomStorageReport(nodeId, - basePath + File.separator + i)); + basePath + File.separator + "data-" + i)); } - return createNodeReport(storageReports); - } - - /** - * Creates NodeReport with the given storage reports. - * - * @param reports one or more storage report - * - * @return NodeReportProto - */ - public static NodeReportProto createNodeReport( - StorageReportProto... reports) { - return createNodeReport(Arrays.asList(reports)); + List metadataStorageReports = + new ArrayList<>(); + for (int i = 0; i < numberOfMetadataStorageReport; i++) { + metadataStorageReports.add(getRandomMetadataStorageReport( + basePath + File.separator + "metadata-" + i)); + } + return createNodeReport(storageReports, metadataStorageReports); } /** * Creates NodeReport with the given storage reports. * * @param reports storage reports to be included in the node report. - * + * @param metaReports metadata storage reports to be included + * in the node report. * @return NodeReportProto */ public static NodeReportProto createNodeReport( - List reports) { + List reports, + List metaReports) { NodeReportProto.Builder nodeReport = NodeReportProto.newBuilder(); nodeReport.addAllStorageReport(reports); + nodeReport.addAllMetadataStorageReport(metaReports); return nodeReport.build(); } @@ -228,6 +232,22 @@ public static StorageReportProto getRandomStorageReport(UUID nodeId, StorageTypeProto.DISK); } + /** + * Generates random metadata storage report. + * + * @param path path of the storage + * + * @return MetadataStorageReportProto + */ + public static MetadataStorageReportProto getRandomMetadataStorageReport( + String path) { + return createMetadataStorageReport(path, + random.nextInt(1000), + random.nextInt(500), + random.nextInt(500), + StorageTypeProto.DISK); + } + public static StorageReportProto createStorageReport(UUID nodeId, String path, long capacity, long used, long remaining, StorageTypeProto type) { return createStorageReport(nodeId, path, capacity, used, remaining, @@ -263,6 +283,39 @@ public static StorageReportProto createStorageReport(UUID nodeId, String path, return srb.build(); } + public static MetadataStorageReportProto createMetadataStorageReport( + String path, long capacity, long used, long remaining, + StorageTypeProto type) { + return createMetadataStorageReport(path, capacity, used, remaining, + type, false); + } + /** + * Creates metadata storage report with the given information. + * + * @param path storage dir + * @param capacity storage size + * @param used space used + * @param remaining space remaining + * @param type type of storage + * + * @return StorageReportProto + */ + public static MetadataStorageReportProto createMetadataStorageReport( + String path, long capacity, long used, long remaining, + StorageTypeProto type, boolean failed) { + Preconditions.checkNotNull(path); + MetadataStorageReportProto.Builder srb = MetadataStorageReportProto + .newBuilder(); + srb.setStorageLocation(path) + .setCapacity(capacity) + .setScmUsed(used) + .setFailed(failed) + .setRemaining(remaining); + StorageTypeProto storageTypeProto = + type == null ? StorageTypeProto.DISK : type; + srb.setStorageType(storageTypeProto); + return srb.build(); + } /** * Generates random container reports. diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java index 7d97d9042d7a..751dee838fd3 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java @@ -23,6 +23,8 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.StorageReportProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.NodeReportProto; import org.apache.hadoop.hdds.protocol.proto @@ -243,7 +245,13 @@ public List getNodes( StorageReportProto storage1 = TestUtils.createStorageReport( di.getUuid(), "/data1-" + di.getUuidString(), capacity, used, remaining, null); + MetadataStorageReportProto metaStorage1 = + TestUtils.createMetadataStorageReport( + "/metadata1-" + di.getUuidString(), capacity, used, + remaining, null); di.updateStorageReports(new ArrayList<>(Arrays.asList(storage1))); + di.updateMetaDataStorageReports( + new ArrayList<>(Arrays.asList(metaStorage1))); healthyNodesWithInfo.add(di); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java index 0b46a64d8913..34f27ce4b57e 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java @@ -22,8 +22,10 @@ import java.util.List; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto; import org.apache.hadoop.hdds.scm.ContainerPlacementStatus; import org.apache.hadoop.hdds.scm.PlacementPolicy; @@ -42,6 +44,7 @@ import org.junit.Test; import org.mockito.Mockito; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN; import static org.apache.hadoop.hdds.scm.net.NetConstants.LEAF_SCHEMA; import static org.apache.hadoop.hdds.scm.net.NetConstants.RACK_SCHEMA; import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT_SCHEMA; @@ -73,6 +76,8 @@ public void setup() { public void testRackAwarePolicy() throws IOException { conf.set(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, SCMContainerPlacementRackAware.class.getName()); + conf.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN, + 0, StorageUnit.MB); NodeSchema[] schemas = new NodeSchema[] {ROOT_SCHEMA, RACK_SCHEMA, LEAF_SCHEMA}; @@ -91,8 +96,14 @@ public void testRackAwarePolicy() throws IOException { StorageReportProto storage1 = TestUtils.createStorageReport( datanodeInfo.getUuid(), "/data1-" + datanodeInfo.getUuidString(), STORAGE_CAPACITY, 0, 100L, null); + MetadataStorageReportProto metaStorage1 = + TestUtils.createMetadataStorageReport( + "/metadata1-" + datanodeInfo.getUuidString(), + STORAGE_CAPACITY, 0, 100L, null); datanodeInfo.updateStorageReports( new ArrayList<>(Arrays.asList(storage1))); + datanodeInfo.updateMetaDataStorageReports( + new ArrayList<>(Arrays.asList(metaStorage1))); datanodes.add(datanodeInfo); cluster.add(datanodeInfo); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java index 5f448d7d09fd..f1ff0d9bf4df 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java @@ -22,10 +22,11 @@ import java.util.List; import java.util.Map; -import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto; import org.apache.hadoop.hdds.scm.TestUtils; import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; @@ -36,6 +37,8 @@ import org.apache.hadoop.hdds.scm.node.NodeStatus; import org.junit.Assert; import org.junit.Test; + +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN; import static org.mockito.Matchers.anyObject; import org.mockito.Mockito; import static org.mockito.Mockito.when; @@ -47,7 +50,10 @@ public class TestSCMContainerPlacementCapacity { @Test public void chooseDatanodes() throws SCMException { //given - ConfigurationSource conf = new OzoneConfiguration(); + OzoneConfiguration conf = new OzoneConfiguration(); + // We are using small units here + conf.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN, + 1, StorageUnit.BYTES); List datanodes = new ArrayList<>(); for (int i = 0; i < 7; i++) { @@ -58,8 +64,14 @@ public void chooseDatanodes() throws SCMException { StorageReportProto storage1 = TestUtils.createStorageReport( datanodeInfo.getUuid(), "/data1-" + datanodeInfo.getUuidString(), 100L, 0, 100L, null); + MetadataStorageReportProto metaStorage1 = + TestUtils.createMetadataStorageReport( + "/metadata1-" + datanodeInfo.getUuidString(), + 100L, 0, 100L, null); datanodeInfo.updateStorageReports( new ArrayList<>(Arrays.asList(storage1))); + datanodeInfo.updateMetaDataStorageReports( + new ArrayList<>(Arrays.asList(metaStorage1))); datanodes.add(datanodeInfo); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java index e1a8d4cc6ec2..013d3ff77dd6 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java @@ -21,10 +21,11 @@ import java.util.Collection; import java.util.List; -import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto; import org.apache.hadoop.hdds.scm.ContainerPlacementStatus; import org.apache.hadoop.hdds.scm.TestUtils; @@ -45,6 +46,8 @@ import org.mockito.Mockito; import org.apache.commons.lang3.StringUtils; + +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN; import static org.apache.hadoop.hdds.scm.net.NetConstants.LEAF_SCHEMA; import static org.apache.hadoop.hdds.scm.net.NetConstants.RACK_SCHEMA; import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT_SCHEMA; @@ -63,7 +66,7 @@ @RunWith(Parameterized.class) public class TestSCMContainerPlacementRackAware { private NetworkTopology cluster; - private ConfigurationSource conf; + private OzoneConfiguration conf; private NodeManager nodeManager; private final Integer datanodeCount; private final List datanodes = new ArrayList<>(); @@ -90,6 +93,9 @@ public static Collection setupDatanodes() { public void setup() { //initialize network topology instance conf = new OzoneConfiguration(); + // We are using small units here + conf.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN, + 1, StorageUnit.BYTES); NodeSchema[] schemas = new NodeSchema[] {ROOT_SCHEMA, RACK_SCHEMA, LEAF_SCHEMA}; NodeSchemaManager.getInstance().init(schemas, true); @@ -108,8 +114,14 @@ public void setup() { StorageReportProto storage1 = TestUtils.createStorageReport( datanodeInfo.getUuid(), "/data1-" + datanodeInfo.getUuidString(), STORAGE_CAPACITY, 0, 100L, null); + MetadataStorageReportProto metaStorage1 = + TestUtils.createMetadataStorageReport( + "/metadata1-" + datanodeInfo.getUuidString(), + STORAGE_CAPACITY, 0, 100L, null); datanodeInfo.updateStorageReports( new ArrayList<>(Arrays.asList(storage1))); + datanodeInfo.updateMetaDataStorageReports( + new ArrayList<>(Arrays.asList(metaStorage1))); datanodes.add(datanodeInfo); cluster.add(datanodeInfo); @@ -398,8 +410,14 @@ public void testDatanodeWithDefaultNetworkLocation() throws SCMException { StorageReportProto storage1 = TestUtils.createStorageReport( datanodeInfo.getUuid(), "/data1-" + datanodeInfo.getUuidString(), STORAGE_CAPACITY, 0, 100L, null); + MetadataStorageReportProto metaStorage1 = + TestUtils.createMetadataStorageReport( + "/metadata1-" + datanodeInfo.getUuidString(), + STORAGE_CAPACITY, 0, 100L, null); datanodeInfo.updateStorageReports( new ArrayList<>(Arrays.asList(storage1))); + datanodeInfo.updateMetaDataStorageReports( + new ArrayList<>(Arrays.asList(metaStorage1))); dataList.add(datanodeInfo); clusterMap.add(datanodeInfo); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java index 942a55b0d2cf..2654b47b58d2 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java @@ -20,10 +20,11 @@ import java.util.Arrays; import java.util.List; -import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto; import org.apache.hadoop.hdds.scm.ContainerPlacementStatus; import org.apache.hadoop.hdds.scm.TestUtils; @@ -36,6 +37,7 @@ import org.junit.Test; import static junit.framework.TestCase.assertEquals; import static junit.framework.TestCase.assertTrue; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN; import static org.junit.Assert.assertFalse; import org.mockito.Mockito; import static org.mockito.Mockito.when; @@ -48,7 +50,10 @@ public class TestSCMContainerPlacementRandom { @Test public void chooseDatanodes() throws SCMException { //given - ConfigurationSource conf = new OzoneConfiguration(); + OzoneConfiguration conf = new OzoneConfiguration(); + // We are using small units here + conf.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN, + 1, StorageUnit.BYTES); List datanodes = new ArrayList<>(); for (int i = 0; i < 5; i++) { @@ -59,8 +64,14 @@ public void chooseDatanodes() throws SCMException { StorageReportProto storage1 = TestUtils.createStorageReport( datanodeInfo.getUuid(), "/data1-" + datanodeInfo.getUuidString(), 100L, 0, 100L, null); + MetadataStorageReportProto metaStorage1 = + TestUtils.createMetadataStorageReport( + "/metadata1-" + datanodeInfo.getUuidString(), + 100L, 0, 100L, null); datanodeInfo.updateStorageReports( new ArrayList<>(Arrays.asList(storage1))); + datanodeInfo.updateMetaDataStorageReports( + new ArrayList<>(Arrays.asList(metaStorage1))); datanodes.add(datanodeInfo); } @@ -109,7 +120,9 @@ public void chooseDatanodes() throws SCMException { @Test public void testPlacementPolicySatisified() { //given - ConfigurationSource conf = new OzoneConfiguration(); + OzoneConfiguration conf = new OzoneConfiguration(); + conf.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN, + 10, StorageUnit.MB); List datanodes = new ArrayList<>(); for (int i = 0; i < 3; i++) { diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java index ee31ae07259a..2a04af43e08c 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java @@ -20,6 +20,7 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.RATIS; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN; import java.io.File; import java.io.IOException; @@ -33,13 +34,14 @@ import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.NodeReportProto; + .StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.StorageReportProto; import org.apache.hadoop.hdds.scm.ScmConfigKeys; @@ -55,8 +57,6 @@ import org.apache.hadoop.hdds.scm.pipeline.PipelineProvider; import org.apache.hadoop.hdds.scm.pipeline.PipelineManagerV2Impl; import org.apache.hadoop.hdds.scm.pipeline.MockRatisPipelineProvider; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher - .NodeReportFromDatanode; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.hdds.server.events.EventPublisher; @@ -93,6 +93,8 @@ public void setup() throws IOException, AuthenticationException { conf.setTimeDuration(HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT, 0, TimeUnit.SECONDS); conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 2); + conf.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN, + 10, StorageUnit.MB); storageDir = GenericTestUtils.getTempPath( TestDeadNodeHandler.class.getSimpleName() + UUID.randomUUID()); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir); @@ -129,11 +131,17 @@ public void testOnMessage() throws Exception { DatanodeDetails datanode3 = MockDatanodeDetails.randomDatanodeDetails(); String storagePath = GenericTestUtils.getRandomizedTempPath() - .concat("/" + datanode1.getUuidString()); + .concat("/data-" + datanode1.getUuidString()); + String metaStoragePath = GenericTestUtils.getRandomizedTempPath() + .concat("/metadata-" + datanode1.getUuidString()); StorageReportProto storageOne = TestUtils.createStorageReport( datanode1.getUuid(), storagePath, 100 * OzoneConsts.TB, 10 * OzoneConsts.TB, 90 * OzoneConsts.TB, null); + MetadataStorageReportProto metaStorageOne = + TestUtils.createMetadataStorageReport(metaStoragePath, + 100 * OzoneConsts.GB, 10 * OzoneConsts.GB, + 90 * OzoneConsts.GB, null); // Exit safemode, as otherwise the safemode precheck will prevent pipelines // from getting created. Due to how this test is wired up, safemode will @@ -144,25 +152,34 @@ public void testOnMessage() throws Exception { // test case happy. nodeManager.register(datanode1, - TestUtils.createNodeReport(storageOne), null); + TestUtils.createNodeReport(Arrays.asList(storageOne), + Arrays.asList(metaStorageOne)), null); nodeManager.register(datanode2, - TestUtils.createNodeReport(storageOne), null); + TestUtils.createNodeReport(Arrays.asList(storageOne), + Arrays.asList(metaStorageOne)), null); nodeManager.register(datanode3, - TestUtils.createNodeReport(storageOne), null); + TestUtils.createNodeReport(Arrays.asList(storageOne), + Arrays.asList(metaStorageOne)), null); nodeManager.register(MockDatanodeDetails.randomDatanodeDetails(), - TestUtils.createNodeReport(storageOne), null); + TestUtils.createNodeReport(Arrays.asList(storageOne), + Arrays.asList(metaStorageOne)), null); nodeManager.register(MockDatanodeDetails.randomDatanodeDetails(), - TestUtils.createNodeReport(storageOne), null); + TestUtils.createNodeReport(Arrays.asList(storageOne), + Arrays.asList(metaStorageOne)), null); nodeManager.register(MockDatanodeDetails.randomDatanodeDetails(), - TestUtils.createNodeReport(storageOne), null); + TestUtils.createNodeReport(Arrays.asList(storageOne), + Arrays.asList(metaStorageOne)), null); nodeManager.register(MockDatanodeDetails.randomDatanodeDetails(), - TestUtils.createNodeReport(storageOne), null); + TestUtils.createNodeReport(Arrays.asList(storageOne), + Arrays.asList(metaStorageOne)), null); nodeManager.register(MockDatanodeDetails.randomDatanodeDetails(), - TestUtils.createNodeReport(storageOne), null); + TestUtils.createNodeReport(Arrays.asList(storageOne), + Arrays.asList(metaStorageOne)), null); nodeManager.register(MockDatanodeDetails.randomDatanodeDetails(), - TestUtils.createNodeReport(storageOne), null); + TestUtils.createNodeReport(Arrays.asList(storageOne), + Arrays.asList(metaStorageOne)), null); LambdaTestUtils.await(120000, 1000, () -> { @@ -265,10 +282,4 @@ private void registerContainers(DatanodeDetails datanode, .map(ContainerInfo::containerID) .collect(Collectors.toSet())); } - - private NodeReportFromDatanode getNodeReport(DatanodeDetails dn, - StorageReportProto... reports) { - NodeReportProto nodeReportProto = TestUtils.createNodeReport(reports); - return new NodeReportFromDatanode(dn, nodeReportProto); - } } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java index 292042278983..d91f733d0514 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java @@ -17,10 +17,13 @@ package org.apache.hadoop.hdds.scm.node; import java.io.IOException; +import java.util.Arrays; +import java.util.List; import java.util.UUID; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto; import org.apache.hadoop.hdds.scm.TestUtils; @@ -51,7 +54,9 @@ public class TestNodeReportHandler implements EventPublisher { private NodeReportHandler nodeReportHandler; private SCMNodeManager nodeManager; private String storagePath = GenericTestUtils.getRandomizedTempPath() - .concat("/" + UUID.randomUUID().toString()); + .concat("/data-" + UUID.randomUUID().toString()); + private String metaStoragePath = GenericTestUtils.getRandomizedTempPath() + .concat("/metadata-" + UUID.randomUUID().toString()); @Before public void resetEventCollector() throws IOException { @@ -69,11 +74,14 @@ public void testNodeReport() throws IOException { DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); StorageReportProto storageOne = TestUtils .createStorageReport(dn.getUuid(), storagePath, 100, 10, 90, null); + MetadataStorageReportProto metaStorageOne = TestUtils + .createMetadataStorageReport(metaStoragePath, 100, 10, 90, null); SCMNodeMetric nodeMetric = nodeManager.getNodeStat(dn); Assert.assertNull(nodeMetric); - nodeManager.register(dn, getNodeReport(dn, storageOne).getReport(), null); + nodeManager.register(dn, getNodeReport(dn, Arrays.asList(storageOne), + Arrays.asList(metaStorageOne)).getReport(), null); nodeMetric = nodeManager.getNodeStat(dn); Assert.assertTrue(nodeMetric.get().getCapacity().get() == 100); @@ -83,7 +91,8 @@ public void testNodeReport() throws IOException { StorageReportProto storageTwo = TestUtils .createStorageReport(dn.getUuid(), storagePath, 100, 10, 90, null); nodeReportHandler.onMessage( - getNodeReport(dn, storageOne, storageTwo), this); + getNodeReport(dn, Arrays.asList(storageOne, storageTwo), + Arrays.asList(metaStorageOne)), this); nodeMetric = nodeManager.getNodeStat(dn); Assert.assertTrue(nodeMetric.get().getCapacity().get() == 200); @@ -93,8 +102,10 @@ public void testNodeReport() throws IOException { } private NodeReportFromDatanode getNodeReport(DatanodeDetails dn, - StorageReportProto... reports) { - NodeReportProto nodeReportProto = TestUtils.createNodeReport(reports); + List reports, + List metaReports) { + NodeReportProto nodeReportProto = + TestUtils.createNodeReport(reports, metaReports); return new NodeReportFromDatanode(dn, nodeReportProto); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java index 7127edf28a91..17c6a0f94504 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java @@ -21,6 +21,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.UUID; import java.util.concurrent.ExecutionException; @@ -936,7 +937,8 @@ public void testScmStatsFromNodeReport() String storagePath = testDir.getAbsolutePath() + "/" + dnId; StorageReportProto report = TestUtils .createStorageReport(dnId, storagePath, capacity, used, free, null); - nodeManager.register(dn, TestUtils.createNodeReport(report), null); + nodeManager.register(dn, TestUtils.createNodeReport( + Arrays.asList(report), Collections.emptyList()), null); nodeManager.processHeartbeat(dn); } //TODO: wait for EventQueue to be processed @@ -988,7 +990,8 @@ public void tesVolumeInfoFromNodeReport() used, free, null, failed)); failed = !failed; } - nodeManager.register(dn, TestUtils.createNodeReport(reports), null); + nodeManager.register(dn, TestUtils.createNodeReport(reports, + Collections.emptyList()), null); nodeManager.processHeartbeat(dn); //TODO: wait for EventQueue to be processed eventQueue.processAll(8000L); @@ -1039,7 +1042,8 @@ public void testScmNodeReportUpdate() StorageReportProto report = TestUtils .createStorageReport(dnId, storagePath, capacity, scmUsed, remaining, null); - NodeReportProto nodeReportProto = TestUtils.createNodeReport(report); + NodeReportProto nodeReportProto = TestUtils.createNodeReport( + Arrays.asList(report), Collections.emptyList()); nodeReportHandler.onMessage( new NodeReportFromDatanode(datanodeDetails, nodeReportProto), publisher); @@ -1159,7 +1163,8 @@ public void testHandlingSCMCommandEvent() eq.addHandler(DATANODE_COMMAND, nodemanager); nodemanager - .register(datanodeDetails, TestUtils.createNodeReport(report), + .register(datanodeDetails, TestUtils.createNodeReport( + Arrays.asList(report), Collections.emptyList()), TestUtils.getRandomPipelineReports()); eq.fireEvent(DATANODE_COMMAND, new CommandForDatanode<>(datanodeDetails.getUuid(), @@ -1337,7 +1342,8 @@ public void testGetNodeInfo() .createStorageReport(dnId, storagePath, capacity, used, remaining, null); - nodeManager.register(datanodeDetails, TestUtils.createNodeReport(report), + nodeManager.register(datanodeDetails, TestUtils.createNodeReport( + Arrays.asList(report), Collections.emptyList()), TestUtils.getRandomPipelineReports()); nodeManager.processHeartbeat(datanodeDetails); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java index ccce3c3d26d7..bceed4210275 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java @@ -33,6 +33,8 @@ import org.junit.Rule; import org.junit.rules.ExpectedException; +import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.UUID; @@ -149,21 +151,24 @@ public void testProcessNodeReportCheckOneNode() throws IOException { StorageReportProto storageReport = TestUtils.createStorageReport(storageId, path, reportCapacity, reportScmUsed, reportRemaining, null); StorageReportResult result = - map.processNodeReport(key, TestUtils.createNodeReport(storageReport)); + map.processNodeReport(key, TestUtils.createNodeReport( + Arrays.asList(storageReport), Collections.emptyList())); Assert.assertEquals(SCMNodeStorageStatMap.ReportStatus.ALL_IS_WELL, result.getStatus()); StorageContainerDatanodeProtocolProtos.NodeReportProto.Builder nrb = NodeReportProto.newBuilder(); StorageReportProto srb = reportSet.iterator().next().getProtoBufMessage(); reportList.add(srb); - result = map.processNodeReport(key, TestUtils.createNodeReport(reportList)); + result = map.processNodeReport(key, TestUtils.createNodeReport( + reportList, Collections.emptyList())); Assert.assertEquals(SCMNodeStorageStatMap.ReportStatus.ALL_IS_WELL, result.getStatus()); reportList.add(TestUtils .createStorageReport(UUID.randomUUID(), path, reportCapacity, reportCapacity, 0, null)); - result = map.processNodeReport(key, TestUtils.createNodeReport(reportList)); + result = map.processNodeReport(key, TestUtils.createNodeReport( + reportList, Collections.emptyList())); Assert.assertEquals(SCMNodeStorageStatMap.ReportStatus.STORAGE_OUT_OF_SPACE, result.getStatus()); // Mark a disk failed diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestStatisticsUpdate.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestStatisticsUpdate.java index f3600ec85011..94dd6126d0ca 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestStatisticsUpdate.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestStatisticsUpdate.java @@ -46,6 +46,8 @@ import org.mockito.Mockito; import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; import java.util.UUID; /** @@ -93,12 +95,16 @@ public void testStatisticsUpdate() throws Exception { datanode2.getUuid(), storagePath2, 200, 20, 180, null); nodeManager.register(datanode1, - TestUtils.createNodeReport(storageOne), null); + TestUtils.createNodeReport(Arrays.asList(storageOne), + Collections.emptyList()), null); nodeManager.register(datanode2, - TestUtils.createNodeReport(storageTwo), null); + TestUtils.createNodeReport(Arrays.asList(storageTwo), + Collections.emptyList()), null); - NodeReportProto nodeReportProto1 = TestUtils.createNodeReport(storageOne); - NodeReportProto nodeReportProto2 = TestUtils.createNodeReport(storageTwo); + NodeReportProto nodeReportProto1 = TestUtils.createNodeReport( + Arrays.asList(storageOne), Collections.emptyList()); + NodeReportProto nodeReportProto2 = TestUtils.createNodeReport( + Arrays.asList(storageTwo), Collections.emptyList()); nodeReportHandler.onMessage( new NodeReportFromDatanode(datanode1, nodeReportProto1), diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java index deecebf35421..88261f01447a 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -57,6 +58,7 @@ import static junit.framework.TestCase.assertEquals; import static junit.framework.TestCase.assertTrue; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE; import static org.apache.hadoop.hdds.scm.net.NetConstants.LEAF_SCHEMA; @@ -90,6 +92,8 @@ public void init() throws Exception { false, PIPELINE_PLACEMENT_MAX_NODES_COUNT); conf = new OzoneConfiguration(); conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, PIPELINE_LOAD_LIMIT); + conf.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN, + 10, StorageUnit.MB); nodeManager.setNumPipelinePerDatanode(PIPELINE_LOAD_LIMIT); stateManager = new PipelineStateManager(); placementPolicy = new PipelinePlacementPolicy( @@ -195,7 +199,34 @@ public void testChooseNodeNotEnoughSpace() throws SCMException { int nodesRequired = HddsProtos.ReplicationFactor.THREE.getNumber(); thrownExp.expect(SCMException.class); - thrownExp.expectMessage("enough space for even a single container"); + thrownExp.expectMessage("healthy datanodes with enough space"); + localPlacementPolicy.chooseDatanodes(new ArrayList<>(datanodes.size()), + new ArrayList<>(datanodes.size()), nodesRequired, 0); + } + + @Test + public void testChooseNodeNotEnoughMetadataSpace() throws SCMException { + // a huge free space min configured + conf.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN, + 500, StorageUnit.TB); + // a small container size + conf.set(OZONE_SCM_CONTAINER_SIZE, "100MB"); + + // There is only one node on 3 racks altogether. + List datanodes = new ArrayList<>(); + for (Node node : SINGLE_NODE_RACK) { + DatanodeDetails datanode = overwriteLocationInNode( + MockDatanodeDetails.randomDatanodeDetails(), node); + datanodes.add(datanode); + } + MockNodeManager localNodeManager = new MockNodeManager(initTopology(), + datanodes, false, datanodes.size()); + PipelinePlacementPolicy localPlacementPolicy = new PipelinePlacementPolicy( + localNodeManager, new PipelineStateManager(), conf); + int nodesRequired = HddsProtos.ReplicationFactor.THREE.getNumber(); + + thrownExp.expect(SCMException.class); + thrownExp.expectMessage("healthy datanodes with enough space"); localPlacementPolicy.chooseDatanodes(new ArrayList<>(datanodes.size()), new ArrayList<>(datanodes.size()), nodesRequired, 0); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java index b25cd388271c..4a915024418e 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java @@ -18,6 +18,7 @@ import java.io.File; import java.net.InetSocketAddress; +import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.UUID; @@ -38,6 +39,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto; import org.apache.hadoop.hdds.scm.TestUtils; import org.apache.hadoop.hdds.scm.VersionInfo; @@ -272,7 +274,10 @@ public void testRegister() throws Exception { SCMRegisteredResponseProto responseProto = rpcEndPoint.getEndPoint() .register(nodeToRegister.getExtendedProtoBufMessage(), TestUtils .createNodeReport( - getStorageReports(nodeToRegister.getUuid())), + Arrays.asList(getStorageReports( + nodeToRegister.getUuid())), + Arrays.asList(getMetadataStorageReports( + nodeToRegister.getUuid()))), TestUtils.getRandomContainerReports(10), TestUtils.getRandomPipelineReports()); Assert.assertNotNull(responseProto); @@ -286,10 +291,16 @@ public void testRegister() throws Exception { } private StorageReportProto getStorageReports(UUID id) { - String storagePath = testDir.getAbsolutePath() + "/" + id; + String storagePath = testDir.getAbsolutePath() + "/data-" + id; return TestUtils.createStorageReport(id, storagePath, 100, 10, 90, null); } + private MetadataStorageReportProto getMetadataStorageReports(UUID id) { + String storagePath = testDir.getAbsolutePath() + "/metadata-" + id; + return TestUtils.createMetadataStorageReport(storagePath, 100, 10, 90, + null); + } + private EndpointStateMachine registerTaskHelper( InetSocketAddress scmAddress, int rpcTimeout, boolean clearDatanodeDetails @@ -300,8 +311,10 @@ private EndpointStateMachine registerTaskHelper( scmAddress, rpcTimeout); rpcEndPoint.setState(EndpointStateMachine.EndPointStates.REGISTER); OzoneContainer ozoneContainer = mock(OzoneContainer.class); + UUID datanodeID = UUID.randomUUID(); when(ozoneContainer.getNodeReport()).thenReturn(TestUtils - .createNodeReport(getStorageReports(UUID.randomUUID()))); + .createNodeReport(Arrays.asList(getStorageReports(datanodeID)), + Arrays.asList(getMetadataStorageReports(datanodeID)))); ContainerController controller = Mockito.mock(ContainerController.class); when(controller.getContainerReport()).thenReturn( TestUtils.getRandomContainerReports(10)); @@ -372,7 +385,8 @@ public void testHeartbeat() throws Exception { SCMHeartbeatRequestProto request = SCMHeartbeatRequestProto.newBuilder() .setDatanodeDetails(dataNode.getProtoBufMessage()) .setNodeReport(TestUtils.createNodeReport( - getStorageReports(UUID.randomUUID()))) + Arrays.asList(getStorageReports(dataNode.getUuid())), + Arrays.asList(getMetadataStorageReports(dataNode.getUuid())))) .build(); SCMHeartbeatResponseProto responseProto = rpcEndPoint.getEndPoint() @@ -394,7 +408,8 @@ public void testHeartbeatWithCommandStatusReport() throws Exception { SCMHeartbeatRequestProto request = SCMHeartbeatRequestProto.newBuilder() .setDatanodeDetails(dataNode.getProtoBufMessage()) .setNodeReport(TestUtils.createNodeReport( - getStorageReports(UUID.randomUUID()))) + Arrays.asList(getStorageReports(dataNode.getUuid())), + Arrays.asList(getMetadataStorageReports(dataNode.getUuid())))) .build(); SCMHeartbeatResponseProto responseProto = rpcEndPoint.getEndPoint() diff --git a/hadoop-ozone/dist/src/main/compose/compatibility/docker-config b/hadoop-ozone/dist/src/main/compose/compatibility/docker-config index 3fd4ca02eced..6ac4b07618bc 100644 --- a/hadoop-ozone/dist/src/main/compose/compatibility/docker-config +++ b/hadoop-ozone/dist/src/main/compose/compatibility/docker-config @@ -17,6 +17,7 @@ OZONE-SITE.XML_ozone.om.address=om OZONE-SITE.XML_ozone.om.http-address=om:9874 OZONE-SITE.XML_ozone.scm.container.size=1GB +OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 OZONE-SITE.XML_ozone.scm.names=scm diff --git a/hadoop-ozone/dist/src/main/compose/ozone-csi/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-csi/docker-config index 2488153c0974..e3fbb6a16ee8 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-csi/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone-csi/docker-config @@ -22,6 +22,7 @@ OZONE-SITE.XML_ozone.csi.socket=/tmp/csi.sock OZONE-SITE.XML_ozone.om.address=om OZONE-SITE.XML_ozone.om.http-address=om:9874 OZONE-SITE.XML_ozone.scm.container.size=1GB +OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 OZONE-SITE.XML_ozone.scm.names=scm diff --git a/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config index 40f27606faed..270ccfbe3d29 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config @@ -31,6 +31,7 @@ OZONE-SITE.XML_ozone.scm.address.scmservice.scm3=scm3 OZONE-SITE.XML_ozone.scm.ratis.enable=true OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data OZONE-SITE.XML_ozone.scm.container.size=1GB +OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata OZONE-SITE.XML_hdds.datanode.dir=/data/hdds OZONE-SITE.XML_ozone.datanode.pipeline.limit=1 diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/common-config b/hadoop-ozone/dist/src/main/compose/ozone-mr/common-config index 15ac89227eeb..d5a92ee7a942 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-mr/common-config +++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/common-config @@ -17,6 +17,7 @@ OZONE-SITE.XML_ozone.om.address=om OZONE-SITE.XML_ozone.om.http-address=om:9874 OZONE-SITE.XML_ozone.scm.container.size=1GB +OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 OZONE-SITE.XML_ozone.scm.names=scm diff --git a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config index 34c241cff53e..4b002f2c59af 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config @@ -26,6 +26,7 @@ OZONE-SITE.XML_ozone.scm.names=scm OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.scm.container.size=1GB +OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata OZONE-SITE.XML_ozone.handler.type=distributed OZONE-SITE.XML_ozone.scm.client.address=scm diff --git a/hadoop-ozone/dist/src/main/compose/ozone-topology/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-topology/docker-config index e46ca589684f..0f3a23613797 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-topology/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone-topology/docker-config @@ -19,6 +19,7 @@ CORE-SITE.XML_fs.defaultFS=ofs://om OZONE-SITE.XML_ozone.om.address=om OZONE-SITE.XML_ozone.om.http-address=om:9874 OZONE-SITE.XML_ozone.scm.container.size=256MB +OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB OZONE-SITE.XML_ozone.ozone.scm.block.size=64MB OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 diff --git a/hadoop-ozone/dist/src/main/compose/ozone/docker-config b/hadoop-ozone/dist/src/main/compose/ozone/docker-config index c9669a1b9fdf..721f1c3217d6 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone/docker-config @@ -20,6 +20,7 @@ CORE-SITE.XML_fs.trash.interval=1 OZONE-SITE.XML_ozone.om.address=om OZONE-SITE.XML_ozone.om.http-address=om:9874 OZONE-SITE.XML_ozone.scm.container.size=1GB +OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 OZONE-SITE.XML_ozone.scm.names=scm diff --git a/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/docker-config b/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/docker-config index 711ac7c1c564..290a163ef8de 100644 --- a/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/docker-config @@ -18,6 +18,7 @@ CORE-SITE.XML_fs.defaultFS=ofs://om OZONE-SITE.XML_ozone.om.address=om OZONE-SITE.XML_ozone.scm.container.size=1GB +OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 OZONE-SITE.XML_ozone.scm.names=scm diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config index 6718365feb80..1602784d875b 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config @@ -37,6 +37,7 @@ OZONE-SITE.XML_ozone.scm.ratis.enable=true OZONE-SITE.XML_ozone.om.volume.listall.allowed=false OZONE-SITE.XML_ozone.scm.container.size=1GB +OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config index 5bff698f26d7..7fa3ef493f2d 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config @@ -17,6 +17,7 @@ OZONE-SITE.XML_ozone.om.address=om OZONE-SITE.XML_ozone.om.http-address=om:9874 OZONE-SITE.XML_ozone.scm.container.size=1GB +OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 OZONE-SITE.XML_ozone.scm.names=scm diff --git a/hadoop-ozone/dist/src/main/compose/restart/docker-config b/hadoop-ozone/dist/src/main/compose/restart/docker-config index f3a46541e99c..cdf0174b178c 100644 --- a/hadoop-ozone/dist/src/main/compose/restart/docker-config +++ b/hadoop-ozone/dist/src/main/compose/restart/docker-config @@ -17,6 +17,7 @@ OZONE-SITE.XML_ozone.om.address=om OZONE-SITE.XML_ozone.om.http-address=om:9874 OZONE-SITE.XML_ozone.scm.container.size=1GB +OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 OZONE-SITE.XML_ozone.scm.names=scm diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/docker-config b/hadoop-ozone/dist/src/main/compose/upgrade/docker-config index e55477d73644..69e7b911d095 100644 --- a/hadoop-ozone/dist/src/main/compose/upgrade/docker-config +++ b/hadoop-ozone/dist/src/main/compose/upgrade/docker-config @@ -19,6 +19,7 @@ CORE-SITE.XML_fs.o3fs.impl=org.apache.hadoop.fs.ozone.OzoneFileSystem OZONE-SITE.XML_ozone.om.address=om OZONE-SITE.XML_ozone.om.http-address=om:9874 OZONE-SITE.XML_ozone.scm.container.size=1GB +OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 OZONE-SITE.XML_ozone.scm.names=scm diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/docker-config b/hadoop-ozone/dist/src/main/compose/xcompat/docker-config index 0599498cf238..ee4c7af6a3ad 100644 --- a/hadoop-ozone/dist/src/main/compose/xcompat/docker-config +++ b/hadoop-ozone/dist/src/main/compose/xcompat/docker-config @@ -25,6 +25,7 @@ OZONE-SITE.XML_ozone.replication=3 OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.scm.client.address=scm OZONE-SITE.XML_ozone.scm.container.size=1GB +OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data OZONE-SITE.XML_ozone.scm.names=scm OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java index e45506c928ed..1773b6d290ce 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java @@ -241,6 +241,9 @@ protected void initializeConfiguration() throws IOException { 32, StorageUnit.KB); conf.setStorageSize(ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE, 1, StorageUnit.MB); + conf.setStorageSize( + ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN, + 0, org.apache.hadoop.hdds.conf.StorageUnit.MB); conf.setTimeDuration(ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS); conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL, 10, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java index 11efaf1d2f64..cb85161eec06 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; @@ -42,6 +43,7 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE; import org.junit.After; @@ -70,6 +72,8 @@ public void setup() throws Exception { //setup a cluster (1G free space is enough for a unit test) conf = new OzoneConfiguration(); conf.set(OZONE_SCM_CONTAINER_SIZE, "1GB"); + conf.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN, + 0, StorageUnit.MB); conf.setBoolean(HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_CREATION, false); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(1).build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java index b28efd2cdc0a..e3eccb5ff634 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; @@ -55,6 +56,7 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE; /** @@ -79,6 +81,8 @@ public class TestDeleteContainerHandler { public static void setup() throws Exception { conf = new OzoneConfiguration(); conf.set(OZONE_SCM_CONTAINER_SIZE, "1GB"); + conf.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN, + 0, StorageUnit.MB); conf.setBoolean(HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_CREATION, false); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(1).build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureDetection.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureDetection.java index cd403554daab..2a7fbe826de4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureDetection.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureDetection.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.cli.ContainerOperationClient; import org.apache.hadoop.hdds.scm.client.ScmClient; @@ -62,6 +63,7 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE; import static org.apache.hadoop.hdds.client.ReplicationType.STAND_ALONE; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CONTAINER_CACHE_SIZE; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION; @@ -89,6 +91,8 @@ public class TestDatanodeHddsVolumeFailureDetection { public void init() throws Exception { ozoneConfig = new OzoneConfiguration(); ozoneConfig.set(OZONE_SCM_CONTAINER_SIZE, "1GB"); + ozoneConfig.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN, + 0, StorageUnit.MB); ozoneConfig.setInt(OZONE_REPLICATION, ReplicationFactor.ONE.getValue()); // keep the cache size = 1, so we could trigger io exception on // reading on-disk db instance diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureToleration.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureToleration.java index e2f29e5f71a7..95b324edb52c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureToleration.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureToleration.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; @@ -39,6 +40,7 @@ import java.io.IOException; import java.util.List; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION; @@ -59,6 +61,8 @@ public class TestDatanodeHddsVolumeFailureToleration { public void init() throws Exception { ozoneConfig = new OzoneConfiguration(); ozoneConfig.set(OZONE_SCM_CONTAINER_SIZE, "1GB"); + ozoneConfig.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN, + 0, StorageUnit.MB); ozoneConfig.setInt(OZONE_REPLICATION, ReplicationFactor.ONE.getValue()); // set tolerated = 1 DatanodeConfiguration dnConf = diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/fsck/TestContainerMapper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/fsck/TestContainerMapper.java index fd12b710ba64..b1820473ee80 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/fsck/TestContainerMapper.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/fsck/TestContainerMapper.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; import org.apache.hadoop.ozone.MiniOzoneCluster; @@ -37,6 +38,8 @@ import java.util.*; import org.junit.Rule; import org.junit.rules.Timeout; + +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS; import static org.junit.Assert.*; @@ -72,6 +75,8 @@ public static void init() throws Exception { dbPath = GenericTestUtils.getRandomizedTempPath(); conf.set(OZONE_OM_DB_DIRS, dbPath); conf.set(ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE, "100MB"); + conf.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN, + 0, StorageUnit.MB); // By default, 2 pipelines are created. Setting the value to 6, will ensure // each pipleine can have 3 containers open. conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, 6);