diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java index 46bbb6662015..90fc50f84ab3 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; +import org.apache.hadoop.hdds.utils.HddsServerUtil; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; @@ -149,7 +150,7 @@ HddsVolume chooseNextVolume() throws IOException { // Choose volume that can hold both container in tmp and dest directory return volumeChoosingPolicy.chooseVolume( StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()), - containerSize * 2); + HddsServerUtil.requiredReplicationSpace(containerSize)); } public static Path getUntarDirectory(HddsVolume hddsVolume) diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java index 5d04f2060f59..f554990fc941 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java @@ -403,6 +403,11 @@ public static Collection getOzoneDatanodeRatisDirectory( return rawLocations; } + public static long requiredReplicationSpace(long defaultContainerSize) { + // During container import it requires double the container size to hold container in tmp and dest directory + return 2 * defaultContainerSize; + } + public static Collection getDatanodeStorageDirs(ConfigurationSource conf) { Collection rawLocations = conf.getTrimmedStringCollection(HDDS_DATANODE_DIR_KEY); if (rawLocations.isEmpty()) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerUtil.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerUtil.java index f0be5b231d99..503126198a0d 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerUtil.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerUtil.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.node.NodeStatus; import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; +import org.apache.hadoop.hdds.utils.HddsServerUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -78,7 +79,7 @@ public static List getTargetDatanodes(PlacementPolicy policy, // Ensure that target datanodes have enough space to hold a complete // container. final long dataSizeRequired = - Math.max(container.getUsedBytes(), defaultContainerSize); + HddsServerUtil.requiredReplicationSpace(Math.max(container.getUsedBytes(), defaultContainerSize)); int mutableRequiredNodes = requiredNodes; while (mutableRequiredNodes > 0) { diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationTestUtil.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationTestUtil.java index 93c670e3ab4f..9cc264b3c70d 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationTestUtil.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationTestUtil.java @@ -20,6 +20,7 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_SERVICE; import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.CLOSED; import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes.FAILED_TO_FIND_SUITABLE_NODE; +import static org.junit.jupiter.api.Assertions.assertEquals; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyBoolean; import static org.mockito.Mockito.anyInt; @@ -36,6 +37,7 @@ import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -43,6 +45,7 @@ import org.apache.hadoop.hdds.scm.ContainerPlacementStatus; import org.apache.hadoop.hdds.scm.PlacementPolicy; import org.apache.hadoop.hdds.scm.SCMCommonPlacementPolicy; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerReplica; @@ -51,6 +54,7 @@ import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.net.Node; import org.apache.hadoop.hdds.scm.node.NodeManager; +import org.apache.hadoop.hdds.utils.HddsServerUtil; import org.apache.hadoop.ozone.protocol.commands.DeleteContainerCommand; import org.apache.hadoop.ozone.protocol.commands.ReconstructECContainersCommand; import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand; @@ -328,6 +332,9 @@ protected List chooseDatanodesInternal( List favoredNodes, int nodesRequiredToChoose, long metadataSizeRequired, long dataSizeRequired) throws SCMException { + long containerSize = (long) conf.getStorageSize(ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE, + ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT, StorageUnit.BYTES); + assertEquals(HddsServerUtil.requiredReplicationSpace(containerSize), dataSizeRequired); if (nodesRequiredToChoose > 1) { throw new IllegalArgumentException("Only one node is allowed"); } @@ -356,6 +363,9 @@ protected List chooseDatanodesInternal( List favoredNodes, int nodesRequiredToChoose, long metadataSizeRequired, long dataSizeRequired) throws SCMException { + long containerSize = (long) conf.getStorageSize(ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE, + ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT, StorageUnit.BYTES); + assertEquals(HddsServerUtil.requiredReplicationSpace(containerSize), dataSizeRequired); throw new SCMException("No nodes available", FAILED_TO_FIND_SUITABLE_NODE); } @@ -383,6 +393,9 @@ protected List chooseDatanodesInternal( List favoredNodes, int nodesRequiredToChoose, long metadataSizeRequired, long dataSizeRequired) throws SCMException { + long containerSize = (long) conf.getStorageSize(ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE, + ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT, StorageUnit.BYTES); + assertEquals(HddsServerUtil.requiredReplicationSpace(containerSize), dataSizeRequired); if (nodesRequiredToChoose >= throwWhenThisOrMoreNodesRequested) { throw new SCMException("No nodes available", FAILED_TO_FIND_SUITABLE_NODE);