Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
import org.apache.hadoop.hdds.utils.HddsServerUtil;
import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml;
import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
Expand Down Expand Up @@ -149,7 +150,7 @@ HddsVolume chooseNextVolume() throws IOException {
// Choose volume that can hold both container in tmp and dest directory
return volumeChoosingPolicy.chooseVolume(
StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()),
containerSize * 2);
HddsServerUtil.requiredReplicationSpace(containerSize));
}

public static Path getUntarDirectory(HddsVolume hddsVolume)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -403,6 +403,11 @@ public static Collection<String> getOzoneDatanodeRatisDirectory(
return rawLocations;
}

public static long requiredReplicationSpace(long defaultContainerSize) {
// During container import it requires double the container size to hold container in tmp and dest directory
return 2 * defaultContainerSize;
}

public static Collection<String> getDatanodeStorageDirs(ConfigurationSource conf) {
Collection<String> rawLocations = conf.getTrimmedStringCollection(HDDS_DATANODE_DIR_KEY);
if (rawLocations.isEmpty()) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@
import org.apache.hadoop.hdds.scm.exceptions.SCMException;
import org.apache.hadoop.hdds.scm.node.NodeStatus;
import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
import org.apache.hadoop.hdds.utils.HddsServerUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

Expand Down Expand Up @@ -78,7 +79,7 @@ public static List<DatanodeDetails> getTargetDatanodes(PlacementPolicy policy,
// Ensure that target datanodes have enough space to hold a complete
// container.
final long dataSizeRequired =
Math.max(container.getUsedBytes(), defaultContainerSize);
HddsServerUtil.requiredReplicationSpace(Math.max(container.getUsedBytes(), defaultContainerSize));

int mutableRequiredNodes = requiredNodes;
while (mutableRequiredNodes > 0) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_SERVICE;
import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.CLOSED;
import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes.FAILED_TO_FIND_SUITABLE_NODE;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.anyBoolean;
import static org.mockito.Mockito.anyInt;
Expand All @@ -36,13 +37,15 @@
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
import org.apache.hadoop.hdds.scm.ContainerPlacementStatus;
import org.apache.hadoop.hdds.scm.PlacementPolicy;
import org.apache.hadoop.hdds.scm.SCMCommonPlacementPolicy;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.ContainerReplica;
Expand All @@ -51,6 +54,7 @@
import org.apache.hadoop.hdds.scm.exceptions.SCMException;
import org.apache.hadoop.hdds.scm.net.Node;
import org.apache.hadoop.hdds.scm.node.NodeManager;
import org.apache.hadoop.hdds.utils.HddsServerUtil;
import org.apache.hadoop.ozone.protocol.commands.DeleteContainerCommand;
import org.apache.hadoop.ozone.protocol.commands.ReconstructECContainersCommand;
import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand;
Expand Down Expand Up @@ -328,6 +332,9 @@ protected List<DatanodeDetails> chooseDatanodesInternal(
List<DatanodeDetails> favoredNodes, int nodesRequiredToChoose,
long metadataSizeRequired, long dataSizeRequired)
throws SCMException {
long containerSize = (long) conf.getStorageSize(ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE,
ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT, StorageUnit.BYTES);
assertEquals(HddsServerUtil.requiredReplicationSpace(containerSize), dataSizeRequired);
if (nodesRequiredToChoose > 1) {
throw new IllegalArgumentException("Only one node is allowed");
}
Expand Down Expand Up @@ -356,6 +363,9 @@ protected List<DatanodeDetails> chooseDatanodesInternal(
List<DatanodeDetails> favoredNodes, int nodesRequiredToChoose,
long metadataSizeRequired, long dataSizeRequired)
throws SCMException {
long containerSize = (long) conf.getStorageSize(ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE,
ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT, StorageUnit.BYTES);
assertEquals(HddsServerUtil.requiredReplicationSpace(containerSize), dataSizeRequired);
throw new SCMException("No nodes available",
FAILED_TO_FIND_SUITABLE_NODE);
}
Expand Down Expand Up @@ -383,6 +393,9 @@ protected List<DatanodeDetails> chooseDatanodesInternal(
List<DatanodeDetails> favoredNodes, int nodesRequiredToChoose,
long metadataSizeRequired, long dataSizeRequired)
throws SCMException {
long containerSize = (long) conf.getStorageSize(ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE,
ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT, StorageUnit.BYTES);
assertEquals(HddsServerUtil.requiredReplicationSpace(containerSize), dataSizeRequired);
if (nodesRequiredToChoose >= throwWhenThisOrMoreNodesRequested) {
throw new SCMException("No nodes available",
FAILED_TO_FIND_SUITABLE_NODE);
Expand Down