Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -372,6 +372,13 @@ public final class ScmConfigKeys {
"ozone.scm.pipeline.per.metadata.disk";

public static final int OZONE_SCM_PIPELINE_PER_METADATA_VOLUME_DEFAULT = 2;

public static final String OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN =
"ozone.scm.datanode.ratis.volume.free-space.min";

public static final String
OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN_DEFAULT = "1GB";

// Max timeout for pipeline to stay at ALLOCATED state before scrubbed.
public static final String OZONE_SCM_PIPELINE_ALLOCATED_TIMEOUT =
"ozone.scm.pipeline.allocated.timeout";
Expand Down
11 changes: 11 additions & 0 deletions hadoop-hdds/common/src/main/resources/ozone-default.xml
Original file line number Diff line number Diff line change
Expand Up @@ -2901,4 +2901,15 @@
directory deleting service per time interval.
</description>
</property>

<property>
<name>ozone.scm.datanode.ratis.volume.free-space.min</name>
<value>1GB</value>
<tag>OZONE, DATANODE</tag>
<description>Minimum amount of storage space required for each ratis
volume on a datanode to hold a new pipeline.
Datanodes with all its ratis volumes with space under this value
will not be allocated a pipeline or container replica.
</description>
</property>
</configuration>
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,9 @@

import com.google.common.base.Preconditions;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto;
import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementStatusDefault;
import org.apache.hadoop.hdds.scm.exceptions.SCMException;
Expand All @@ -37,6 +39,9 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN_DEFAULT;

/**
* This policy implements a set of invariants which are common
* for all basic placement policies, acts as the repository of helper
Expand Down Expand Up @@ -169,13 +174,35 @@ public boolean hasEnoughSpace(DatanodeDetails datanodeDetails,
long sizeRequired) {
Preconditions.checkArgument(datanodeDetails instanceof DatanodeInfo);

long metaSizeRequired = (long) conf.getStorageSize(
OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN,
OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN_DEFAULT,
StorageUnit.BYTES);

boolean enoughForData = false;
boolean enoughForMeta = false;

DatanodeInfo datanodeInfo = (DatanodeInfo) datanodeDetails;
for (StorageReportProto reportProto : datanodeInfo.getStorageReports()) {
if (reportProto.getRemaining() > sizeRequired) {
return true;
enoughForData = true;
break;
}
}
return false;

if (!enoughForData) {
return false;
}

for (MetadataStorageReportProto reportProto
: datanodeInfo.getMetadataStorageReports()) {
if (reportProto.getRemaining() > metaSizeRequired) {
enoughForMeta = true;
break;
}
}

return enoughForData && enoughForMeta;
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -143,6 +143,20 @@ public List<StorageReportProto> getStorageReports() {
}
}

/**
* Returns the storage reports associated with this datanode.
*
* @return list of storage report
*/
public List<MetadataStorageReportProto> getMetadataStorageReports() {
try {
lock.readLock().lock();
return metadataStorageReports;
} finally {
lock.readLock().unlock();
}
}

/**
* Returns count of healthy volumes reported from datanode.
* @return count of healthy volumes
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,9 @@
import java.util.Set;
import java.util.stream.Collectors;

import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN_DEFAULT;

/**
* Pipeline placement policy that choose datanodes based on load balancing
* and network topology to supply pipeline creation.
Expand Down Expand Up @@ -158,15 +161,21 @@ List<DatanodeDetails> filterViableNodes(
ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT,
StorageUnit.BYTES);

long metaSizeRequired = (long) conf.getStorageSize(
OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN,
OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN_DEFAULT,
StorageUnit.BYTES);

// filter nodes that don't even have space for one container
List<DatanodeDetails> canHoldList = healthyNodes.stream().filter(d ->
hasEnoughSpace(d, sizeRequired)).collect(Collectors.toList());

if (canHoldList.size() < nodesRequired) {
msg = String.format("Pipeline creation failed due to no sufficient" +
" healthy datanodes with enough space for even a single container." +
" Required %d. Found %d. Container size %d.",
nodesRequired, canHoldList.size(), sizeRequired);
" healthy datanodes with enough space for container data and " +
"metadata. Required %d. Found %d. Container data required %d, " +
"metadata required %d.",
nodesRequired, canHoldList.size(), sizeRequired, metaSizeRequired);
LOG.warn(msg);
throw new SCMException(msg,
SCMException.ResultCodes.FAILED_TO_FIND_SUITABLE_NODE);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,9 @@
import org.apache.hadoop.hdds.protocol
.proto.StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.StorageReportProto;
.StorageContainerDatanodeProtocolProtos.StorageReportProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.StorageTypeProto;
import org.apache.hadoop.hdds.scm.node.SCMNodeManager;
Expand Down Expand Up @@ -149,20 +151,23 @@ public static List<DatanodeDetails> getListOfRegisteredDatanodeDetails(
* @return NodeReportProto
*/
public static NodeReportProto getRandomNodeReport() {
return getRandomNodeReport(1);
return getRandomNodeReport(1, 1);
}

/**
* Generates random NodeReport with the given number of storage report in it.
*
* @param numberOfStorageReport number of storage report this node report
* should have
* @param numberOfMetadataStorageReport number of metadata storage report
* this node report should have
* @return NodeReportProto
*/
public static NodeReportProto getRandomNodeReport(int numberOfStorageReport) {
public static NodeReportProto getRandomNodeReport(int numberOfStorageReport,
int numberOfMetadataStorageReport) {
UUID nodeId = UUID.randomUUID();
return getRandomNodeReport(nodeId, File.separator + nodeId,
numberOfStorageReport);
numberOfStorageReport, numberOfMetadataStorageReport);
}

/**
Expand All @@ -172,42 +177,41 @@ public static NodeReportProto getRandomNodeReport(int numberOfStorageReport) {
* @param nodeId datanode id
* @param basePath base path of storage directory
* @param numberOfStorageReport number of storage report
* @param numberOfMetadataStorageReport number of metadata storage report
*
* @return NodeReportProto
*/
public static NodeReportProto getRandomNodeReport(UUID nodeId,
String basePath, int numberOfStorageReport) {
String basePath, int numberOfStorageReport,
int numberOfMetadataStorageReport) {
List<StorageReportProto> storageReports = new ArrayList<>();
for (int i = 0; i < numberOfStorageReport; i++) {
storageReports.add(getRandomStorageReport(nodeId,
basePath + File.separator + i));
basePath + File.separator + "data-" + i));
}
return createNodeReport(storageReports);
}

/**
* Creates NodeReport with the given storage reports.
*
* @param reports one or more storage report
*
* @return NodeReportProto
*/
public static NodeReportProto createNodeReport(
StorageReportProto... reports) {
return createNodeReport(Arrays.asList(reports));
List<MetadataStorageReportProto> metadataStorageReports =
new ArrayList<>();
for (int i = 0; i < numberOfMetadataStorageReport; i++) {
metadataStorageReports.add(getRandomMetadataStorageReport(
basePath + File.separator + "metadata-" + i));
}
return createNodeReport(storageReports, metadataStorageReports);
}

/**
* Creates NodeReport with the given storage reports.
*
* @param reports storage reports to be included in the node report.
*
* @param metaReports metadata storage reports to be included
* in the node report.
* @return NodeReportProto
*/
public static NodeReportProto createNodeReport(
List<StorageReportProto> reports) {
List<StorageReportProto> reports,
List<MetadataStorageReportProto> metaReports) {
NodeReportProto.Builder nodeReport = NodeReportProto.newBuilder();
nodeReport.addAllStorageReport(reports);
nodeReport.addAllMetadataStorageReport(metaReports);
return nodeReport.build();
}

Expand All @@ -228,6 +232,22 @@ public static StorageReportProto getRandomStorageReport(UUID nodeId,
StorageTypeProto.DISK);
}

/**
* Generates random metadata storage report.
*
* @param path path of the storage
*
* @return MetadataStorageReportProto
*/
public static MetadataStorageReportProto getRandomMetadataStorageReport(
String path) {
return createMetadataStorageReport(path,
random.nextInt(1000),
random.nextInt(500),
random.nextInt(500),
StorageTypeProto.DISK);
}

public static StorageReportProto createStorageReport(UUID nodeId, String path,
long capacity, long used, long remaining, StorageTypeProto type) {
return createStorageReport(nodeId, path, capacity, used, remaining,
Expand Down Expand Up @@ -263,6 +283,39 @@ public static StorageReportProto createStorageReport(UUID nodeId, String path,
return srb.build();
}

public static MetadataStorageReportProto createMetadataStorageReport(
String path, long capacity, long used, long remaining,
StorageTypeProto type) {
return createMetadataStorageReport(path, capacity, used, remaining,
type, false);
}
/**
* Creates metadata storage report with the given information.
*
* @param path storage dir
* @param capacity storage size
* @param used space used
* @param remaining space remaining
* @param type type of storage
*
* @return StorageReportProto
*/
public static MetadataStorageReportProto createMetadataStorageReport(
String path, long capacity, long used, long remaining,
StorageTypeProto type, boolean failed) {
Preconditions.checkNotNull(path);
MetadataStorageReportProto.Builder srb = MetadataStorageReportProto
.newBuilder();
srb.setStorageLocation(path)
.setCapacity(capacity)
.setScmUsed(used)
.setFailed(failed)
.setRemaining(remaining);
StorageTypeProto storageTypeProto =
type == null ? StorageTypeProto.DISK : type;
srb.setStorageType(storageTypeProto);
return srb.build();
}

/**
* Generates random container reports.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,8 @@
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.StorageReportProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.NodeReportProto;
import org.apache.hadoop.hdds.protocol.proto
Expand Down Expand Up @@ -243,7 +245,13 @@ public List<DatanodeDetails> getNodes(
StorageReportProto storage1 = TestUtils.createStorageReport(
di.getUuid(), "/data1-" + di.getUuidString(),
capacity, used, remaining, null);
MetadataStorageReportProto metaStorage1 =
TestUtils.createMetadataStorageReport(
"/metadata1-" + di.getUuidString(), capacity, used,
remaining, null);
di.updateStorageReports(new ArrayList<>(Arrays.asList(storage1)));
di.updateMetaDataStorageReports(
new ArrayList<>(Arrays.asList(metaStorage1)));

healthyNodesWithInfo.add(di);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,10 @@
import java.util.List;

import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto;
import org.apache.hadoop.hdds.scm.ContainerPlacementStatus;
import org.apache.hadoop.hdds.scm.PlacementPolicy;
Expand All @@ -42,6 +44,7 @@
import org.junit.Test;
import org.mockito.Mockito;

import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN;
import static org.apache.hadoop.hdds.scm.net.NetConstants.LEAF_SCHEMA;
import static org.apache.hadoop.hdds.scm.net.NetConstants.RACK_SCHEMA;
import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT_SCHEMA;
Expand Down Expand Up @@ -73,6 +76,8 @@ public void setup() {
public void testRackAwarePolicy() throws IOException {
conf.set(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY,
SCMContainerPlacementRackAware.class.getName());
conf.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN,
0, StorageUnit.MB);

NodeSchema[] schemas = new NodeSchema[]
{ROOT_SCHEMA, RACK_SCHEMA, LEAF_SCHEMA};
Expand All @@ -91,8 +96,14 @@ public void testRackAwarePolicy() throws IOException {
StorageReportProto storage1 = TestUtils.createStorageReport(
datanodeInfo.getUuid(), "/data1-" + datanodeInfo.getUuidString(),
STORAGE_CAPACITY, 0, 100L, null);
MetadataStorageReportProto metaStorage1 =
TestUtils.createMetadataStorageReport(
"/metadata1-" + datanodeInfo.getUuidString(),
STORAGE_CAPACITY, 0, 100L, null);
datanodeInfo.updateStorageReports(
new ArrayList<>(Arrays.asList(storage1)));
datanodeInfo.updateMetaDataStorageReports(
new ArrayList<>(Arrays.asList(metaStorage1)));

datanodes.add(datanodeInfo);
cluster.add(datanodeInfo);
Expand Down
Loading