diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java index 8d7dc087c67b..abf3040035e9 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java @@ -20,6 +20,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; +import com.google.common.cache.CacheLoader; import com.google.protobuf.ServiceException; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.HddsUtils; @@ -64,6 +65,8 @@ import org.apache.hadoop.ozone.container.common.volume.VolumeUsage; import org.apache.hadoop.util.Time; import org.apache.ratis.statemachine.StateMachine; +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; import org.apache.ratis.thirdparty.com.google.protobuf.ProtocolMessageEnum; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -74,6 +77,7 @@ import java.util.Optional; import java.util.Set; import java.util.TreeMap; +import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import static org.apache.hadoop.ozone.audit.AuditLogger.PerformanceStringBuilder; @@ -113,6 +117,8 @@ public class HddsDispatcher implements ContainerDispatcher, Auditor { private ContainerMetrics metrics; private final TokenVerifier tokenVerifier; private long slowOpThresholdMs; + private final Cache cachedVolumeUsage; + private VolumeUsage.MinFreeSpaceCalculator freeSpaceCalculator; /** * Constructs an OzoneContainer that receives calls from @@ -147,6 +153,19 @@ public HddsDispatcher(ConfigurationSource config, ContainerSet contSet, LOG, HddsUtils::processForDebug, HddsUtils::processForDebug); + this.cachedVolumeUsage = CacheBuilder.newBuilder() + .maximumSize(1000) + .refreshAfterWrite(1, TimeUnit.SECONDS) + .expireAfterAccess(1, TimeUnit.MINUTES) + .build( + new CacheLoader() { + @Override + public SpaceUsageSource load(HddsVolume volume) throws Exception { + return volume.getCurrentUsage(); + } + } + ); + this.freeSpaceCalculator = new VolumeUsage.MinFreeSpaceCalculator(conf); } @Override @@ -608,11 +627,17 @@ private boolean isVolumeFull(Container container) { .orElse(Boolean.FALSE); if (isOpen) { HddsVolume volume = container.getContainerData().getVolume(); - SpaceUsageSource precomputedVolumeSpace = - volume.getCurrentUsage(); + SpaceUsageSource precomputedVolumeSpace; + try { + precomputedVolumeSpace = cachedVolumeUsage.get(volume, + volume::getCurrentUsage); + } catch (ExecutionException e) { + // it shouldn't happen + throw new RuntimeException(e); + } long volumeCapacity = precomputedVolumeSpace.getCapacity(); long volumeFreeSpaceToSpare = - VolumeUsage.getMinVolumeFreeSpace(conf, volumeCapacity); + freeSpaceCalculator.get(volumeCapacity); long volumeFree = precomputedVolumeSpace.getAvailable(); long volumeCommitted = volume.getCommittedBytes(); long volumeAvailable = volumeFree - volumeCommitted; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java index d18998821b1e..39cc499882da 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java @@ -139,6 +139,51 @@ public long getReservedBytes() { return reservedInBytes; } + /** + * Convenience class to calculate minimum free space. + */ + public static class MinFreeSpaceCalculator { + private final boolean minFreeSpaceConfigured; + private final boolean minFreeSpacePercentConfigured; + private final long freeSpace; + private float freeSpacePercent; + private long defaultFreeSpace; + public MinFreeSpaceCalculator(ConfigurationSource conf) { + minFreeSpaceConfigured = conf.isConfigured(HDDS_DATANODE_VOLUME_MIN_FREE_SPACE); + minFreeSpacePercentConfigured = conf.isConfigured(HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_PERCENT); + freeSpace = (long)conf.getStorageSize(HDDS_DATANODE_VOLUME_MIN_FREE_SPACE, + HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_DEFAULT, StorageUnit.BYTES); + if (minFreeSpacePercentConfigured) { + freeSpacePercent = Float.parseFloat( + conf.get(HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_PERCENT)); + } + StorageSize measure = StorageSize.parse(HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_DEFAULT); + double byteValue = measure.getUnit().toBytes(measure.getValue()); + defaultFreeSpace = (long)StorageUnit.BYTES.fromBytes(byteValue); + } + + public long get(long capacity) { + if (minFreeSpaceConfigured && minFreeSpacePercentConfigured) { + LOG.error( + "Both {} and {} are set. Set either one, not both. If both are set," + + "it will use default value which is {} as min free space", + HDDS_DATANODE_VOLUME_MIN_FREE_SPACE, + HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_PERCENT, + HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_DEFAULT); + return defaultFreeSpace; + } + + if (minFreeSpaceConfigured) { + return freeSpace; + } else if (minFreeSpacePercentConfigured) { + return (long) (capacity * freeSpacePercent); + } + // either properties are not configured,then return + // HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_DEFAULT + return defaultFreeSpace; + } + } + /** * If 'hdds.datanode.volume.min.free.space' is defined, * it will be honored first. If it is not defined and diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestReservedVolumeSpace.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestReservedVolumeSpace.java index cd9beab4b797..5e2dd0c75c9b 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestReservedVolumeSpace.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestReservedVolumeSpace.java @@ -18,6 +18,8 @@ package org.apache.hadoop.ozone.container.common.volume; +import org.apache.hadoop.conf.StorageUnit; +import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.fs.MockSpaceUsageCheckFactory; import org.apache.hadoop.hdds.scm.ScmConfigKeys; @@ -30,6 +32,8 @@ import java.nio.file.Path; import java.util.UUID; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_PERCENT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED_PERCENT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED_PERCENT_DEFAULT; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -178,6 +182,28 @@ public void testPathsCanonicalized() throws Exception { assertEquals(500, reservedFromVolume); } + @Test + public void testMinFreeSpaceCalculator() throws Exception { + OzoneConfiguration conf = new OzoneConfiguration(); + double minSpace = 100.0; + conf.setStorageSize(HddsConfigKeys.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE, + minSpace, StorageUnit.BYTES); + VolumeUsage.MinFreeSpaceCalculator calc = new VolumeUsage.MinFreeSpaceCalculator(conf); + long capacity = 1000; + assertEquals(minSpace, calc.get(capacity)); + + conf.setFloat(HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_PERCENT, 0.01f); + calc = new VolumeUsage.MinFreeSpaceCalculator(conf); + // default is 5GB + assertEquals(5L * 1024 * 1024 * 1024, calc.get(capacity)); + + // capacity * 1% = 10 + conf.unset(HDDS_DATANODE_VOLUME_MIN_FREE_SPACE); + calc = new VolumeUsage.MinFreeSpaceCalculator(conf); + assertEquals(10, calc.get(capacity)); + } + + private long getExpectedDefaultReserved(HddsVolume volume) { long totalCapacity = volume.getVolumeInfo().get().getUsageForTesting().realUsage().getCapacity(); return (long) Math.ceil(totalCapacity * HDDS_DATANODE_DIR_DU_RESERVED_PERCENT_DEFAULT);