diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
index 2fcc3a67db4e..370c480315da 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
@@ -59,14 +59,6 @@ public final class HddsConfigKeys {
public static final String HDDS_DATANODE_VOLUME_CHOOSING_POLICY =
"hdds.datanode.volume.choosing.policy";
- public static final String HDDS_DATANODE_VOLUME_MIN_FREE_SPACE =
- "hdds.datanode.volume.min.free.space";
- public static final String HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_DEFAULT =
- "5GB";
-
- public static final String HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_PERCENT =
- "hdds.datanode.volume.min.free.space.percent";
-
public static final String HDDS_DB_PROFILE = "hdds.db.profile";
// Once a container usage crosses this threshold, it is eligible for
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index a3cdeadcd0ee..155eb48bf7ef 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -218,17 +218,6 @@
This volume choosing policy randomly chooses two volumes with remaining space and then picks the one with lower utilization.
-
- hdds.datanode.volume.min.free.space
- 5GB
- OZONE, CONTAINER, STORAGE, MANAGEMENT
-
- This determines the free space to be used for closing containers
- When the difference between volume capacity and used reaches this number,
- containers that reside on this volume will be closed and no new containers
- would be allocated on this volume.
-
-
hdds.container.ratis.enabled
false
diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigType.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigType.java
index db8d7f969fbd..d12f659f8c55 100644
--- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigType.java
+++ b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigType.java
@@ -168,6 +168,18 @@ void set(ConfigurationTarget target, String key, Object value,
Config config) {
target.setDouble(key, (double) value);
}
+ },
+ FLOAT {
+ @Override
+ Float parse(String value, Config config, Class> type, String key) {
+ return Float.parseFloat(value);
+ }
+
+ @Override
+ void set(ConfigurationTarget target, String key, Object value,
+ Config config) {
+ target.setFloat(key, (float) value);
+ }
};
abstract Object parse(String value, Config config, Class> type, String key)
diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationTarget.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationTarget.java
index 863d80f00384..2749be4aa281 100644
--- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationTarget.java
+++ b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationTarget.java
@@ -39,6 +39,10 @@ default void setDouble(String name, double value) {
set(name, Double.toString(value));
}
+ default void setFloat(String name, float value) {
+ set(name, Float.toString(value));
+ }
+
default void setBoolean(String name, boolean value) {
set(name, Boolean.toString(value));
}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
index dc8fcde3fbed..6fdbcb1b3cd8 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
@@ -112,7 +112,6 @@ public class HddsDispatcher implements ContainerDispatcher, Auditor {
private ContainerMetrics metrics;
private final TokenVerifier tokenVerifier;
private long slowOpThresholdNs;
- private VolumeUsage.MinFreeSpaceCalculator freeSpaceCalculator;
/**
* Constructs an OzoneContainer that receives calls from
@@ -146,7 +145,6 @@ public HddsDispatcher(ConfigurationSource config, ContainerSet contSet,
LOG,
HddsUtils::processForDebug,
HddsUtils::processForDebug);
- this.freeSpaceCalculator = new VolumeUsage.MinFreeSpaceCalculator(conf);
}
@Override
@@ -619,7 +617,7 @@ private boolean isVolumeFull(Container container) {
if (isOpen) {
HddsVolume volume = container.getContainerData().getVolume();
SpaceUsageSource usage = volume.getCurrentUsage();
- long volumeFreeSpaceToSpare = freeSpaceCalculator.get(usage.getCapacity());
+ long volumeFreeSpaceToSpare = volume.getFreeSpaceToSpare(usage.getCapacity());
return !VolumeUsage.hasVolumeEnoughSpace(usage.getAvailable(), volume.getCommittedBytes(), 0,
volumeFreeSpaceToSpare);
}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java
index e30da44e8b00..301a43320f38 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java
@@ -19,12 +19,10 @@
import java.io.IOException;
import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageTypeProto;
import org.apache.hadoop.ozone.container.common.interfaces.StorageLocationReportMXBean;
-import org.apache.hadoop.ozone.container.common.volume.VolumeUsage;
/**
* Storage location stats of datanodes that provide back store for containers.
@@ -168,11 +166,6 @@ private static StorageType getStorageType(StorageTypeProto proto) throws
* @throws IOException In case, the storage type specified is invalid.
*/
public StorageReportProto getProtoBufMessage() throws IOException {
- return getProtoBufMessage(null);
- }
-
- public StorageReportProto getProtoBufMessage(ConfigurationSource conf)
- throws IOException {
StorageReportProto.Builder srb = StorageReportProto.newBuilder();
return srb.setStorageUuid(getId())
.setCapacity(getCapacity())
@@ -182,8 +175,7 @@ public StorageReportProto getProtoBufMessage(ConfigurationSource conf)
.setStorageType(getStorageTypeProto())
.setStorageLocation(getStorageLocation())
.setFailed(isFailed())
- .setFreeSpaceToSpare(conf != null ?
- new VolumeUsage.MinFreeSpaceCalculator(conf).get(getCapacity()) : 0)
+ .setFreeSpaceToSpare(getFreeSpaceToSpare())
.build();
}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java
index 7731fdf1e273..a486a1eaeeb5 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java
@@ -18,7 +18,11 @@
package org.apache.hadoop.ozone.container.common.statemachine;
import static java.util.concurrent.TimeUnit.MICROSECONDS;
+import static org.apache.hadoop.hdds.conf.ConfigTag.CONTAINER;
import static org.apache.hadoop.hdds.conf.ConfigTag.DATANODE;
+import static org.apache.hadoop.hdds.conf.ConfigTag.MANAGEMENT;
+import static org.apache.hadoop.hdds.conf.ConfigTag.OZONE;
+import static org.apache.hadoop.hdds.conf.ConfigTag.STORAGE;
import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.CONFIG_PREFIX;
import java.time.Duration;
@@ -28,6 +32,7 @@
import org.apache.hadoop.hdds.conf.ConfigType;
import org.apache.hadoop.hdds.conf.PostConstruct;
import org.apache.hadoop.hdds.conf.ReconfigurableConfig;
+import org.apache.hadoop.hdds.conf.StorageSize;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -68,6 +73,13 @@ public class DatanodeConfiguration extends ReconfigurableConfig {
"hdds.datanode.disk.check.min.gap";
public static final String DISK_CHECK_TIMEOUT_KEY =
"hdds.datanode.disk.check.timeout";
+ public static final String HDDS_DATANODE_VOLUME_MIN_FREE_SPACE =
+ "hdds.datanode.volume.min.free.space";
+ public static final String HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_DEFAULT =
+ "5GB";
+ public static final String HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_PERCENT =
+ "hdds.datanode.volume.min.free.space.percent";
+ static final byte MIN_FREE_SPACE_UNSET = -1;
public static final String WAIT_ON_ALL_FOLLOWERS =
"hdds.datanode.wait.on.all.followers";
@@ -319,6 +331,25 @@ public void setBlockDeletionLimit(int limit) {
this.blockLimitPerInterval = limit;
}
+ @Config(key = "hdds.datanode.volume.min.free.space",
+ defaultValue = "-1",
+ type = ConfigType.SIZE,
+ tags = { OZONE, CONTAINER, STORAGE, MANAGEMENT },
+ description = "This determines the free space to be used for closing containers" +
+ " When the difference between volume capacity and used reaches this number," +
+ " containers that reside on this volume will be closed and no new containers" +
+ " would be allocated on this volume."
+ )
+ private long minFreeSpace = MIN_FREE_SPACE_UNSET;
+
+ @Config(key = "hdds.datanode.volume.min.free.space.percent",
+ defaultValue = "-1",
+ type = ConfigType.FLOAT,
+ tags = { OZONE, CONTAINER, STORAGE, MANAGEMENT },
+ description = "" // not documented
+ )
+ private float minFreeSpaceRatio = MIN_FREE_SPACE_UNSET;
+
@Config(key = "periodic.disk.check.interval.minutes",
defaultValue = "60",
type = ConfigType.LONG,
@@ -719,6 +750,39 @@ public void validate() {
rocksdbDeleteObsoleteFilesPeriod =
ROCKSDB_DELETE_OBSOLETE_FILES_PERIOD_MICRO_SECONDS_DEFAULT;
}
+
+ validateMinFreeSpace();
+ }
+
+ /**
+ * If 'hdds.datanode.volume.min.free.space' is defined,
+ * it will be honored first. If it is not defined and
+ * 'hdds.datanode.volume.min.free.space.percent' is defined, it will honor this
+ * else it will fall back to 'hdds.datanode.volume.min.free.space.default'
+ */
+ private void validateMinFreeSpace() {
+ if (minFreeSpaceRatio > 1) {
+ LOG.warn("{} = {} is invalid, should be between 0 and 1",
+ HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_PERCENT, minFreeSpaceRatio);
+ minFreeSpaceRatio = MIN_FREE_SPACE_UNSET;
+ }
+
+ final boolean minFreeSpaceConfigured = minFreeSpace >= 0;
+ final boolean minFreeSpaceRatioConfigured = minFreeSpaceRatio >= 0;
+
+ if (minFreeSpaceConfigured && minFreeSpaceRatioConfigured) {
+ LOG.warn("Only one of {}={} and {}={} should be set. With both set, default value ({}) will be used.",
+ HDDS_DATANODE_VOLUME_MIN_FREE_SPACE,
+ minFreeSpace,
+ HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_PERCENT,
+ minFreeSpaceRatio,
+ HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_DEFAULT);
+ }
+
+ if (minFreeSpaceConfigured == minFreeSpaceRatioConfigured) {
+ minFreeSpaceRatio = MIN_FREE_SPACE_UNSET;
+ minFreeSpace = getDefaultFreeSpace();
+ }
}
public void setContainerDeleteThreads(int containerDeleteThreads) {
@@ -737,6 +801,20 @@ public int getContainerCloseThreads() {
return containerCloseThreads;
}
+ public long getMinFreeSpace(long capacity) {
+ return minFreeSpaceRatio >= 0
+ ? ((long) (capacity * minFreeSpaceRatio))
+ : minFreeSpace;
+ }
+
+ public long getMinFreeSpace() {
+ return minFreeSpace;
+ }
+
+ public float getMinFreeSpaceRatio() {
+ return minFreeSpaceRatio;
+ }
+
public long getPeriodicDiskCheckIntervalMinutes() {
return periodicDiskCheckIntervalMinutes;
}
@@ -966,4 +1044,10 @@ public void setAutoCompactionSmallSstFileThreads(
this.autoCompactionSmallSstFileThreads =
autoCompactionSmallSstFileThreads;
}
+
+ static long getDefaultFreeSpace() {
+ final StorageSize measure = StorageSize.parse(HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_DEFAULT);
+ return Math.round(measure.getUnit().toBytes(measure.getValue()));
+ }
+
}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AvailableSpaceFilter.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AvailableSpaceFilter.java
index 330cc1d9f39a..d584600acf63 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AvailableSpaceFilter.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AvailableSpaceFilter.java
@@ -44,8 +44,7 @@ public boolean test(HddsVolume vol) {
long free = usage.getAvailable();
long committed = vol.getCommittedBytes();
long available = free - committed;
- long volumeFreeSpaceToSpare =
- new VolumeUsage.MinFreeSpaceCalculator(vol.getConf()).get(volumeCapacity);
+ long volumeFreeSpaceToSpare = vol.getFreeSpaceToSpare(volumeCapacity);
boolean hasEnoughSpace = VolumeUsage.hasVolumeEnoughSpace(free, committed,
requiredSpace, volumeFreeSpaceToSpare);
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
index 70f25fa138dc..5513363d2a60 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
@@ -35,7 +35,6 @@
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature;
import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
-import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration;
import org.apache.hadoop.ozone.container.common.utils.DatanodeStoreCache;
import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
import org.apache.hadoop.ozone.container.common.utils.RawDB;
@@ -142,7 +141,6 @@ private HddsVolume(Builder b) throws IOException {
volumeIOStats = null;
volumeInfoMetrics = new VolumeInfoMetrics(b.getVolumeRootStr(), this);
}
-
}
@Override
@@ -264,14 +262,13 @@ public synchronized VolumeCheckResult check(@Nullable Boolean unused)
throws Exception {
VolumeCheckResult result = super.check(unused);
- DatanodeConfiguration df = getConf().getObject(DatanodeConfiguration.class);
if (isDbLoadFailure()) {
LOG.warn("Volume {} failed to access RocksDB: RocksDB parent directory is null, " +
"the volume might not have been loaded properly.", getStorageDir());
return VolumeCheckResult.FAILED;
}
if (result != VolumeCheckResult.HEALTHY ||
- !df.getContainerSchemaV3Enabled() || !isDbLoaded()) {
+ !getDatanodeConfig().getContainerSchemaV3Enabled() || !isDbLoaded()) {
return result;
}
@@ -305,6 +302,10 @@ public long getCommittedBytes() {
return committedBytes.get();
}
+ public long getFreeSpaceToSpare(long volumeCapacity) {
+ return getDatanodeConfig().getMinFreeSpace(volumeCapacity);
+ }
+
public void setDbVolume(DbVolume dbVolume) {
this.dbVolume = dbVolume;
}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java
index 29888b23b4fe..f65d89e4cfe8 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java
@@ -469,6 +469,7 @@ public StorageLocationReport[] getStorageReport() {
long remaining = 0;
long capacity = 0;
long committed = 0;
+ long spare = 0;
String rootDir = "";
failed = true;
if (volumeInfo.isPresent()) {
@@ -478,8 +479,9 @@ public StorageLocationReport[] getStorageReport() {
scmUsed = usage.getUsedSpace();
remaining = usage.getAvailable();
capacity = usage.getCapacity();
- committed = (volume instanceof HddsVolume) ?
- ((HddsVolume) volume).getCommittedBytes() : 0;
+ HddsVolume hddsVolume = volume instanceof HddsVolume ? (HddsVolume) volume : null;
+ committed = hddsVolume != null ? hddsVolume.getCommittedBytes() : 0;
+ spare = hddsVolume != null ? hddsVolume.getFreeSpaceToSpare(capacity) : 0;
failed = false;
} catch (UncheckedIOException ex) {
LOG.warn("Failed to get scmUsed and remaining for container " +
@@ -500,6 +502,7 @@ public StorageLocationReport[] getStorageReport() {
.setRemaining(remaining)
.setScmUsed(scmUsed)
.setCommitted(committed)
+ .setFreeSpaceToSpare(spare)
.setStorageType(volume.getStorageType());
StorageLocationReport r = builder.build();
reports[counter++] = r;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java
index 822510512663..6056bd9b37d3 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java
@@ -115,7 +115,8 @@ public enum VolumeState {
private long cTime; // creation time of the file system state
private int layoutVersion; // layout version of the storage data
- private ConfigurationSource conf;
+ private final ConfigurationSource conf;
+ private final DatanodeConfiguration dnConf;
private final File storageDir;
private String workingDirName;
@@ -150,10 +151,9 @@ protected StorageVolume(Builder> b) throws IOException {
this.state = VolumeState.NOT_INITIALIZED;
this.clusterID = b.clusterID;
this.datanodeUuid = b.datanodeUuid;
- this.conf = b.conf;
- DatanodeConfiguration dnConf =
- conf.getObject(DatanodeConfiguration.class);
+ this.conf = b.conf;
+ this.dnConf = conf.getObject(DatanodeConfiguration.class);
this.ioTestCount = dnConf.getVolumeIOTestCount();
this.ioFailureTolerance = dnConf.getVolumeIOFailureTolerance();
this.ioTestSlidingWindow = new LinkedList<>();
@@ -167,6 +167,8 @@ protected StorageVolume(Builder> b) throws IOException {
this.state = VolumeState.FAILED;
this.ioTestCount = 0;
this.ioFailureTolerance = 0;
+ this.conf = null;
+ this.dnConf = null;
}
}
@@ -529,6 +531,10 @@ public ConfigurationSource getConf() {
return conf;
}
+ public DatanodeConfiguration getDatanodeConfig() {
+ return dnConf;
+ }
+
public void failVolume() {
setState(VolumeState.FAILED);
volumeInfo.ifPresent(VolumeInfo::shutdownUsageThread);
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java
index 531886c744e9..6c4f7bf35cbe 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java
@@ -17,9 +17,6 @@
package org.apache.hadoop.ozone.container.common.volume;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_DEFAULT;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_PERCENT;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED_PERCENT;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED_PERCENT_DEFAULT;
@@ -31,7 +28,6 @@
import org.apache.hadoop.hdds.conf.ConfigurationException;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.conf.StorageSize;
-import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.fs.CachingSpaceUsageSource;
import org.apache.hadoop.hdds.fs.SpaceUsageCheckParams;
import org.apache.hadoop.hdds.fs.SpaceUsageSource;
@@ -128,58 +124,6 @@ public long getReservedBytes() {
return reservedInBytes;
}
- /**
- * Convenience class to calculate minimum free space.
- */
- public static class MinFreeSpaceCalculator {
- private final boolean minFreeSpaceConfigured;
- private final boolean minFreeSpacePercentConfigured;
- private final long freeSpace;
- private float freeSpacePercent;
- private final long defaultFreeSpace;
- public MinFreeSpaceCalculator(ConfigurationSource conf) {
- // cache these values to avoid repeated lookups
- minFreeSpaceConfigured = conf.isConfigured(HDDS_DATANODE_VOLUME_MIN_FREE_SPACE);
- minFreeSpacePercentConfigured = conf.isConfigured(HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_PERCENT);
- freeSpace = (long)conf.getStorageSize(HDDS_DATANODE_VOLUME_MIN_FREE_SPACE,
- HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_DEFAULT, StorageUnit.BYTES);
- if (minFreeSpacePercentConfigured) {
- freeSpacePercent = Float.parseFloat(
- conf.get(HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_PERCENT));
- }
- StorageSize measure = StorageSize.parse(HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_DEFAULT);
- double byteValue = measure.getUnit().toBytes(measure.getValue());
- defaultFreeSpace = (long)StorageUnit.BYTES.fromBytes(byteValue);
- }
-
- /**
- * If 'hdds.datanode.volume.min.free.space' is defined,
- * it will be honored first. If it is not defined and
- * 'hdds.datanode.volume.min.free.space' is defined, it will honor this
- * else it will fall back to 'hdds.datanode.volume.min.free.space.default'
- */
- public long get(long capacity) {
- if (minFreeSpaceConfigured && minFreeSpacePercentConfigured) {
- LOG.error(
- "Both {} and {} are set. Set either one, not both. If both are set,"
- + "it will use default value which is {} as min free space",
- HDDS_DATANODE_VOLUME_MIN_FREE_SPACE,
- HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_PERCENT,
- HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_DEFAULT);
- return defaultFreeSpace;
- }
-
- if (minFreeSpaceConfigured) {
- return freeSpace;
- } else if (minFreeSpacePercentConfigured) {
- return (long) (capacity * freeSpacePercent);
- }
- // either properties are not configured,then return
- // HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_DEFAULT
- return defaultFreeSpace;
- }
- }
-
public static boolean hasVolumeEnoughSpace(long volumeAvailableSpace,
long volumeCommittedBytesCount,
long requiredSpace,
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
index 91ff1511bd5c..750bb6e28321 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
@@ -606,7 +606,7 @@ public StorageContainerDatanodeProtocolProtos.NodeReportProto getNodeReport()
NodeReportProto.newBuilder();
for (StorageLocationReport report : reports) {
- nrb.addStorageReport(report.getProtoBufMessage(config));
+ nrb.addStorageReport(report.getProtoBufMessage());
}
StorageLocationReport[] metaReports = metaVolumeSet.getStorageReport();
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
index 5aca3bf1a39b..1243b5d46b0b 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
@@ -50,7 +50,6 @@
import org.apache.commons.codec.digest.DigestUtils;
import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.fs.MockSpaceUsageCheckFactory;
@@ -245,7 +244,7 @@ public void testContainerCloseActionWhenVolumeFull(
ContainerLayoutVersion layoutVersion) throws Exception {
String testDirPath = testDir.getPath();
OzoneConfiguration conf = new OzoneConfiguration();
- conf.setStorageSize(HddsConfigKeys.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE,
+ conf.setStorageSize(DatanodeConfiguration.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE,
100.0, StorageUnit.BYTES);
DatanodeDetails dd = randomDatanodeDetails();
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestDatanodeConfiguration.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestDatanodeConfiguration.java
index 3f4b9e4f8d64..8eace9512151 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestDatanodeConfiguration.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestDatanodeConfiguration.java
@@ -31,6 +31,7 @@
import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.FAILED_VOLUMES_TOLERATED_DEFAULT;
import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.PERIODIC_DISK_CHECK_INTERVAL_MINUTES_DEFAULT;
import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.PERIODIC_DISK_CHECK_INTERVAL_MINUTES_KEY;
+import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.getDefaultFreeSpace;
import static org.junit.jupiter.api.Assertions.assertEquals;
import java.util.concurrent.TimeUnit;
@@ -44,12 +45,16 @@
import org.apache.ratis.server.RaftServerConfigKeys;
import org.apache.ratis.util.TimeDuration;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.ValueSource;
/**
* Test for {@link DatanodeConfiguration}.
*/
public class TestDatanodeConfiguration {
+ private static final long[] CAPACITIES = {100_000, 1_000_000, 10_000_000};
+
@Test
public void acceptsValidValues() {
// GIVEN
@@ -149,6 +154,7 @@ public void overridesInvalidValues() {
public void isCreatedWitDefaultValues() {
// GIVEN
OzoneConfiguration conf = new OzoneConfiguration();
+ conf.unset(DatanodeConfiguration.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE); // set in ozone-site.xml
// WHEN
DatanodeConfiguration subject = conf.getObject(DatanodeConfiguration.class);
@@ -170,6 +176,65 @@ public void isCreatedWitDefaultValues() {
subject.getDiskCheckTimeout());
assertEquals(BLOCK_DELETE_COMMAND_WORKER_INTERVAL_DEFAULT,
subject.getBlockDeleteCommandWorkerInterval());
+ assertEquals(DatanodeConfiguration.getDefaultFreeSpace(), subject.getMinFreeSpace());
+ assertEquals(DatanodeConfiguration.MIN_FREE_SPACE_UNSET, subject.getMinFreeSpaceRatio());
+ }
+
+ @Test
+ void rejectsInvalidMinFreeSpaceRatio() {
+ OzoneConfiguration conf = new OzoneConfiguration();
+ conf.setFloat(DatanodeConfiguration.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_PERCENT, 1.5f);
+
+ DatanodeConfiguration subject = conf.getObject(DatanodeConfiguration.class);
+
+ assertEquals(DatanodeConfiguration.MIN_FREE_SPACE_UNSET, subject.getMinFreeSpaceRatio());
+ }
+
+ @Test
+ void usesDefaultFreeSpaceIfBothMinFreeSpacePropertiesSet() {
+ OzoneConfiguration conf = new OzoneConfiguration();
+ conf.setLong(DatanodeConfiguration.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE, 10000);
+ conf.setFloat(DatanodeConfiguration.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_PERCENT, .5f);
+
+ DatanodeConfiguration subject = conf.getObject(DatanodeConfiguration.class);
+
+ assertEquals(DatanodeConfiguration.getDefaultFreeSpace(), subject.getMinFreeSpace());
+ assertEquals(DatanodeConfiguration.MIN_FREE_SPACE_UNSET, subject.getMinFreeSpaceRatio());
+
+ for (long capacity : CAPACITIES) {
+ assertEquals(getDefaultFreeSpace(), subject.getMinFreeSpace(capacity));
+ }
+ }
+
+ @ParameterizedTest
+ @ValueSource(longs = {1_000, 10_000, 100_000})
+ void usesFixedMinFreeSpace(long bytes) {
+ OzoneConfiguration conf = new OzoneConfiguration();
+ conf.setLong(DatanodeConfiguration.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE, bytes);
+
+ DatanodeConfiguration subject = conf.getObject(DatanodeConfiguration.class);
+
+ assertEquals(bytes, subject.getMinFreeSpace());
+ assertEquals(DatanodeConfiguration.MIN_FREE_SPACE_UNSET, subject.getMinFreeSpaceRatio());
+
+ for (long capacity : CAPACITIES) {
+ assertEquals(bytes, subject.getMinFreeSpace(capacity));
+ }
+ }
+
+ @ParameterizedTest
+ @ValueSource(ints = {1, 10, 100})
+ void calculatesMinFreeSpaceRatio(int percent) {
+ OzoneConfiguration conf = new OzoneConfiguration();
+ conf.unset(DatanodeConfiguration.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE); // set in ozone-site.xml
+ conf.setFloat(DatanodeConfiguration.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_PERCENT, percent / 100.0f);
+
+ DatanodeConfiguration subject = conf.getObject(DatanodeConfiguration.class);
+
+ assertEquals(percent / 100.0f, subject.getMinFreeSpaceRatio());
+ for (long capacity : CAPACITIES) {
+ assertEquals(capacity * percent / 100, subject.getMinFreeSpace(capacity));
+ }
}
@Test
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestReservedVolumeSpace.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestReservedVolumeSpace.java
index 7127c7ad80b0..bcc123520e02 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestReservedVolumeSpace.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestReservedVolumeSpace.java
@@ -17,10 +17,10 @@
package org.apache.hadoop.ozone.container.common.volume;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_PERCENT;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED_PERCENT;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED_PERCENT_DEFAULT;
+import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE;
+import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_PERCENT;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
@@ -30,11 +30,11 @@
import java.nio.file.Path;
import java.util.UUID;
import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.conf.ConfigurationException;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.fs.MockSpaceUsageCheckFactory;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;
@@ -196,21 +196,18 @@ public void testPathsCanonicalized() throws Exception {
public void testMinFreeSpaceCalculator() throws Exception {
OzoneConfiguration conf = new OzoneConfiguration();
double minSpace = 100.0;
- conf.setStorageSize(HddsConfigKeys.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE,
+ conf.setStorageSize(DatanodeConfiguration.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE,
minSpace, StorageUnit.BYTES);
- VolumeUsage.MinFreeSpaceCalculator calc = new VolumeUsage.MinFreeSpaceCalculator(conf);
long capacity = 1000;
- assertEquals(minSpace, calc.get(capacity));
+ assertEquals(minSpace, conf.getObject(DatanodeConfiguration.class).getMinFreeSpace(capacity));
conf.setFloat(HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_PERCENT, 0.01f);
- calc = new VolumeUsage.MinFreeSpaceCalculator(conf);
// default is 5GB
- assertEquals(5L * 1024 * 1024 * 1024, calc.get(capacity));
+ assertEquals(5L * 1024 * 1024 * 1024, conf.getObject(DatanodeConfiguration.class).getMinFreeSpace(capacity));
// capacity * 1% = 10
conf.unset(HDDS_DATANODE_VOLUME_MIN_FREE_SPACE);
- calc = new VolumeUsage.MinFreeSpaceCalculator(conf);
- assertEquals(10, calc.get(capacity));
+ assertEquals(10, conf.getObject(DatanodeConfiguration.class).getMinFreeSpace(capacity));
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
index 9c040f9aa006..45dc992980d8 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
@@ -25,6 +25,7 @@
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.server.SCMHTTPServerConfig;
import org.apache.hadoop.hdds.server.http.HttpServer2;
+import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration;
import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.recon.ReconServerConfigKeys;
import org.apache.hadoop.ozone.s3.S3GatewayConfigKeys;
@@ -129,7 +130,7 @@ private void addPropertiesNotInXml() {
ScmConfigKeys.OZONE_SCM_PIPELINE_PLACEMENT_IMPL_KEY,
ScmConfigKeys.OZONE_SCM_HA_PREFIX,
S3GatewayConfigKeys.OZONE_S3G_FSO_DIRECTORY_CREATION_ENABLED,
- HddsConfigKeys.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_PERCENT,
+ DatanodeConfiguration.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_PERCENT,
OzoneConfigKeys.HDDS_SCM_CLIENT_RPC_TIME_OUT,
OzoneConfigKeys.HDDS_SCM_CLIENT_MAX_RETRY_TIMEOUT,
OzoneConfigKeys.HDDS_SCM_CLIENT_FAILOVER_MAX_RETRY