From 9b1e532c0b78c18f281b3433ebd96e6e440d54a5 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Mon, 24 Mar 2025 10:04:40 +0100 Subject: [PATCH 1/5] move storageType to StorageVolume --- .../container/common/volume/StorageVolume.java | 10 +++++----- .../ozone/container/common/volume/VolumeInfo.java | 15 --------------- 2 files changed, 5 insertions(+), 20 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java index 6056bd9b37d3..0f10eeb5c035 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java @@ -118,6 +118,7 @@ public enum VolumeState { private final ConfigurationSource conf; private final DatanodeConfiguration dnConf; + private final StorageType storageType; private final File storageDir; private String workingDirName; private File tmpDir; @@ -139,12 +140,12 @@ public enum VolumeState { private int healthCheckFileSize; protected StorageVolume(Builder b) throws IOException { + storageType = b.storageType; if (!b.failedVolume) { StorageLocation location = StorageLocation.parse(b.volumeRootStr); storageDir = new File(location.getUri().getPath(), b.storageDirStr); this.volumeInfo = Optional.of( new VolumeInfo.Builder(b.volumeRootStr, b.conf) - .storageType(b.storageType) .usageCheckFactory(b.usageCheckFactory) .build()); this.volumeSet = b.volumeSet; @@ -375,7 +376,7 @@ public abstract static class Builder> { private final String volumeRootStr; private String storageDirStr; private ConfigurationSource conf; - private StorageType storageType; + private StorageType storageType = StorageType.DEFAULT; private SpaceUsageCheckFactory usageCheckFactory; private VolumeSet volumeSet; private boolean failedVolume = false; @@ -395,7 +396,7 @@ public T conf(ConfigurationSource config) { } public T storageType(StorageType st) { - this.storageType = st; + this.storageType = Objects.requireNonNull(st, "storageType == null"); return this.getThis(); } @@ -491,8 +492,7 @@ public VolumeSet getVolumeSet() { } public StorageType getStorageType() { - return volumeInfo.map(VolumeInfo::getStorageType) - .orElse(StorageType.DEFAULT); + return storageType; } public String getStorageID() { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java index a25676bf5c73..d0363915297c 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java @@ -20,7 +20,6 @@ import com.google.common.annotations.VisibleForTesting; import java.io.File; import java.io.IOException; -import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory; import org.apache.hadoop.hdds.fs.SpaceUsageCheckParams; @@ -90,7 +89,6 @@ public final class VolumeInfo { private static final Logger LOG = LoggerFactory.getLogger(VolumeInfo.class); private final String rootDir; - private final StorageType storageType; // Space usage calculator private final VolumeUsage usage; @@ -102,18 +100,12 @@ public static class Builder { private final ConfigurationSource conf; private final String rootDir; private SpaceUsageCheckFactory usageCheckFactory; - private StorageType storageType; public Builder(String root, ConfigurationSource config) { this.rootDir = root; this.conf = config; } - public Builder storageType(StorageType st) { - this.storageType = st; - return this; - } - public Builder usageCheckFactory(SpaceUsageCheckFactory factory) { this.usageCheckFactory = factory; return this; @@ -136,9 +128,6 @@ private VolumeInfo(Builder b) throws IOException { throw new IOException("Unable to create the volume root dir at " + root); } - this.storageType = (b.storageType != null ? - b.storageType : StorageType.DEFAULT); - SpaceUsageCheckFactory usageCheckFactory = b.usageCheckFactory; if (usageCheckFactory == null) { usageCheckFactory = SpaceUsageCheckFactory.create(b.conf); @@ -173,10 +162,6 @@ public String getRootDir() { return this.rootDir; } - public StorageType getStorageType() { - return this.storageType; - } - /** * Only for testing. Do not use otherwise. */ From 503d985867b60c34ce2909b7c8eb1f66a63ad7c8 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Mon, 24 Mar 2025 10:09:37 +0100 Subject: [PATCH 2/5] move String rootDir to StorageVolume --- .../ozone/container/common/volume/MutableVolumeSet.java | 5 ++--- .../ozone/container/common/volume/StorageVolume.java | 6 ++++-- .../hadoop/ozone/container/common/volume/VolumeInfo.java | 9 +-------- 3 files changed, 7 insertions(+), 13 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java index f65d89e4cfe8..76379fa93f87 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java @@ -470,11 +470,10 @@ public StorageLocationReport[] getStorageReport() { long capacity = 0; long committed = 0; long spare = 0; - String rootDir = ""; + String rootDir = volume.getVolumeRootDir(); failed = true; if (volumeInfo.isPresent()) { try { - rootDir = volumeInfo.get().getRootDir(); SpaceUsageSource usage = volumeInfo.get().getCurrentUsage(); scmUsed = usage.getUsedSpace(); remaining = usage.getAvailable(); @@ -485,7 +484,7 @@ public StorageLocationReport[] getStorageReport() { failed = false; } catch (UncheckedIOException ex) { LOG.warn("Failed to get scmUsed and remaining for container " + - "storage location {}", volumeInfo.get().getRootDir(), ex); + "storage location {}", rootDir, ex); // reset scmUsed and remaining if df/du failed. scmUsed = 0; remaining = 0; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java index 0f10eeb5c035..65810138eba8 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java @@ -119,6 +119,7 @@ public enum VolumeState { private final DatanodeConfiguration dnConf; private final StorageType storageType; + private final String volumeRoot; private final File storageDir; private String workingDirName; private File tmpDir; @@ -141,8 +142,9 @@ public enum VolumeState { protected StorageVolume(Builder b) throws IOException { storageType = b.storageType; + volumeRoot = b.volumeRootStr; if (!b.failedVolume) { - StorageLocation location = StorageLocation.parse(b.volumeRootStr); + StorageLocation location = StorageLocation.parse(volumeRoot); storageDir = new File(location.getUri().getPath(), b.storageDirStr); this.volumeInfo = Optional.of( new VolumeInfo.Builder(b.volumeRootStr, b.conf) @@ -444,7 +446,7 @@ public StorageType getStorageType() { } public String getVolumeRootDir() { - return volumeInfo.map(VolumeInfo::getRootDir).orElse(null); + return volumeRoot; } public SpaceUsageSource getCurrentUsage() { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java index d0363915297c..e0c01f38d0c6 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java @@ -88,8 +88,6 @@ public final class VolumeInfo { private static final Logger LOG = LoggerFactory.getLogger(VolumeInfo.class); - private final String rootDir; - // Space usage calculator private final VolumeUsage usage; @@ -118,8 +116,7 @@ public VolumeInfo build() throws IOException { private VolumeInfo(Builder b) throws IOException { - this.rootDir = b.rootDir; - File root = new File(this.rootDir); + File root = new File(b.rootDir); boolean succeeded = root.isDirectory() || root.mkdirs(); @@ -158,10 +155,6 @@ void shutdownUsageThread() { usage.shutdown(); } - public String getRootDir() { - return this.rootDir; - } - /** * Only for testing. Do not use otherwise. */ From 0350a99a5b9012bb4c26be0628d396addbba7428 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Mon, 24 Mar 2025 10:30:39 +0100 Subject: [PATCH 3/5] remove VolumeInfo --- .../container/common/volume/DbVolume.java | 2 +- .../container/common/volume/HddsVolume.java | 2 +- .../common/volume/MutableVolumeSet.java | 4 +- .../common/volume/StorageVolume.java | 49 +++-- .../container/common/volume/VolumeInfo.java | 169 ------------------ .../container/common/volume/VolumeUsage.java | 59 +++++- .../volume/TestReservedVolumeSpace.java | 14 +- .../common/volume/TestVolumeSet.java | 4 +- .../hadoop/ozone/UniformDatanodesFactory.java | 2 +- 9 files changed, 103 insertions(+), 202 deletions(-) delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/DbVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/DbVolume.java index f7a99b9b1ee9..0f188e53b137 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/DbVolume.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/DbVolume.java @@ -55,7 +55,7 @@ protected DbVolume(Builder b) throws IOException { super(b); this.hddsDbStorePathMap = new HashMap<>(); - if (!b.getFailedVolume() && getVolumeInfo().isPresent()) { + if (!b.getFailedVolume()) { LOG.info("Creating DbVolume: {} of storage type: {}, {}", getStorageDir(), b.getStorageType(), getCurrentUsage()); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java index 5513363d2a60..d355908e5e91 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java @@ -119,7 +119,7 @@ public HddsVolume build() throws IOException { private HddsVolume(Builder b) throws IOException { super(b); - if (!b.getFailedVolume() && getVolumeInfo().isPresent()) { + if (!b.getFailedVolume()) { this.setState(VolumeState.NOT_INITIALIZED); ConfigurationSource conf = getConf(); int[] intervals = conf.getInts(OZONE_DATANODE_IO_METRICS_PERCENTILES_INTERVALS_SECONDS_KEY); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java index 76379fa93f87..29cc05fcee70 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java @@ -277,7 +277,7 @@ public void checkVolumeAsync(StorageVolume volume) { } public void refreshAllVolumeUsage() { - volumeMap.forEach((k, v) -> v.refreshVolumeInfo()); + volumeMap.forEach((k, v) -> v.refreshVolumeUsage()); } /** @@ -464,7 +464,7 @@ public StorageLocationReport[] getStorageReport() { StorageVolume volume; for (Map.Entry entry : volumeMap.entrySet()) { volume = entry.getValue(); - Optional volumeInfo = volume.getVolumeInfo(); + Optional volumeInfo = volume.getVolumeInfo(); long scmUsed = 0; long remaining = 0; long capacity = 0; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java index 65810138eba8..28c062e43252 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java @@ -37,6 +37,7 @@ import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory; +import org.apache.hadoop.hdds.fs.SpaceUsageCheckParams; import org.apache.hadoop.hdds.fs.SpaceUsageSource; import org.apache.hadoop.hdfs.server.datanode.StorageLocation; import org.apache.hadoop.hdfs.server.datanode.checker.Checkable; @@ -125,7 +126,7 @@ public enum VolumeState { private File tmpDir; private File diskCheckDir; - private final Optional volumeInfo; + private final Optional volumeUsage; private final VolumeSet volumeSet; @@ -146,10 +147,8 @@ protected StorageVolume(Builder b) throws IOException { if (!b.failedVolume) { StorageLocation location = StorageLocation.parse(volumeRoot); storageDir = new File(location.getUri().getPath(), b.storageDirStr); - this.volumeInfo = Optional.of( - new VolumeInfo.Builder(b.volumeRootStr, b.conf) - .usageCheckFactory(b.usageCheckFactory) - .build()); + SpaceUsageCheckParams checkParams = getSpaceUsageCheckParams(b); + volumeUsage = Optional.of(new VolumeUsage(checkParams, b.conf)); this.volumeSet = b.volumeSet; this.state = VolumeState.NOT_INITIALIZED; this.clusterID = b.clusterID; @@ -164,7 +163,7 @@ protected StorageVolume(Builder b) throws IOException { this.healthCheckFileSize = dnConf.getVolumeHealthCheckFileSize(); } else { storageDir = new File(b.volumeRootStr); - this.volumeInfo = Optional.empty(); + volumeUsage = Optional.empty(); this.volumeSet = null; this.storageID = UUID.randomUUID().toString(); this.state = VolumeState.FAILED; @@ -449,8 +448,9 @@ public String getVolumeRootDir() { return volumeRoot; } + /** Get current usage of the volume. */ public SpaceUsageSource getCurrentUsage() { - return volumeInfo.map(VolumeInfo::getCurrentUsage) + return volumeUsage.map(VolumeUsage::getCurrentUsage) .orElse(SpaceUsageSource.UNKNOWN); } @@ -471,21 +471,22 @@ public File getDiskCheckDir() { return this.diskCheckDir; } - public void refreshVolumeInfo() { - volumeInfo.ifPresent(VolumeInfo::refreshNow); + public void refreshVolumeUsage() { + volumeUsage.ifPresent(VolumeUsage::refreshNow); } - public Optional getVolumeInfo() { - return this.volumeInfo; + /** @see #getCurrentUsage() */ + public Optional getVolumeInfo() { + return volumeUsage; } public void incrementUsedSpace(long usedSpace) { - volumeInfo.ifPresent(volInfo -> volInfo + volumeUsage.ifPresent(usage -> usage .incrementUsedSpace(usedSpace)); } public void decrementUsedSpace(long reclaimedSpace) { - volumeInfo.ifPresent(volInfo -> volInfo + volumeUsage.ifPresent(usage -> usage .decrementUsedSpace(reclaimedSpace)); } @@ -539,12 +540,12 @@ public DatanodeConfiguration getDatanodeConfig() { public void failVolume() { setState(VolumeState.FAILED); - volumeInfo.ifPresent(VolumeInfo::shutdownUsageThread); + volumeUsage.ifPresent(VolumeUsage::shutdown); } public void shutdown() { setState(VolumeState.NON_EXISTENT); - volumeInfo.ifPresent(VolumeInfo::shutdownUsageThread); + volumeUsage.ifPresent(VolumeUsage::shutdown); cleanTmpDiskCheckDir(); } @@ -681,4 +682,22 @@ public boolean equals(Object other) { public String toString() { return getStorageDir().toString(); } + + private static SpaceUsageCheckParams getSpaceUsageCheckParams(Builder b) throws IOException { + File root = new File(b.volumeRootStr); + + boolean succeeded = root.isDirectory() || root.mkdirs(); + + if (!succeeded) { + LOG.error("Unable to create the volume root dir at : {}", root); + throw new IOException("Unable to create the volume root dir at " + root); + } + + SpaceUsageCheckFactory usageCheckFactory = b.usageCheckFactory; + if (usageCheckFactory == null) { + usageCheckFactory = SpaceUsageCheckFactory.create(b.conf); + } + + return usageCheckFactory.paramsFor(root); + } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java deleted file mode 100644 index e0c01f38d0c6..000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java +++ /dev/null @@ -1,169 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.volume; - -import com.google.common.annotations.VisibleForTesting; -import java.io.File; -import java.io.IOException; -import org.apache.hadoop.hdds.conf.ConfigurationSource; -import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory; -import org.apache.hadoop.hdds.fs.SpaceUsageCheckParams; -import org.apache.hadoop.hdds.fs.SpaceUsageSource; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Stores information about a disk/volume. - * - * Since we have a reserved space for each volume for other usage, - * let's clarify the space values a bit here: - * - used: hdds actual usage. - * - avail: remaining space for hdds usage. - * - reserved: total space for other usage. - * - capacity: total space for hdds usage. - * - other: space used by other service consuming the same volume. - * - fsAvail: reported remaining space from local fs. - * - fsUsed: reported total used space from local fs. - * - fsCapacity: reported total capacity from local fs. - * - minVolumeFreeSpace (mvfs) : determines the free space for closing - containers.This is like adding a few reserved bytes to reserved space. - Dn's will send close container action to SCM at this limit, and it is - configurable. - - * - *
- * {@code
- * |----used----|   (avail)   |++mvfs++|++++reserved+++++++|
- * |<-     capacity                  ->|
- *              |     fsAvail      |-------other-----------|
- * |<-                   fsCapacity                      ->|
- * }
- *
- * What we could directly get from local fs:
- *     fsCapacity, fsAvail, (fsUsed = fsCapacity - fsAvail)
- * We could get from config:
- *     reserved
- * Get from cmd line:
- *     used: from cmd 'du' (by default)
- * Get from calculation:
- *     capacity = fsCapacity - reserved
- *     other = fsUsed - used
- *
- * The avail is the result we want from calculation.
- * So, it could be:
- * A) avail = capacity - used
- * B) avail = fsAvail - Max(reserved - other, 0);
- *
- * To be Conservative, we could get min
- *     avail = Max(Min(A, B), 0);
- *
- * If we have a dedicated disk for hdds and are not using the reserved space,
- * then we should use DedicatedDiskSpaceUsage for
- * `hdds.datanode.du.factory.classname`,
- * Then it is much simpler, since we don't care about other usage:
- * {@code
- *  |----used----|             (avail)/fsAvail              |
- *  |<-              capacity/fsCapacity                  ->|
- * }
- *
- *  We have avail == fsAvail.
- *  
- */ -public final class VolumeInfo { - - private static final Logger LOG = LoggerFactory.getLogger(VolumeInfo.class); - - // Space usage calculator - private final VolumeUsage usage; - - /** - * Builder for VolumeInfo. - */ - public static class Builder { - private final ConfigurationSource conf; - private final String rootDir; - private SpaceUsageCheckFactory usageCheckFactory; - - public Builder(String root, ConfigurationSource config) { - this.rootDir = root; - this.conf = config; - } - - public Builder usageCheckFactory(SpaceUsageCheckFactory factory) { - this.usageCheckFactory = factory; - return this; - } - - public VolumeInfo build() throws IOException { - return new VolumeInfo(this); - } - } - - private VolumeInfo(Builder b) throws IOException { - - File root = new File(b.rootDir); - - boolean succeeded = root.isDirectory() || root.mkdirs(); - - if (!succeeded) { - LOG.error("Unable to create the volume root dir at : {}", root); - throw new IOException("Unable to create the volume root dir at " + root); - } - - SpaceUsageCheckFactory usageCheckFactory = b.usageCheckFactory; - if (usageCheckFactory == null) { - usageCheckFactory = SpaceUsageCheckFactory.create(b.conf); - } - SpaceUsageCheckParams checkParams = - usageCheckFactory.paramsFor(root); - - usage = new VolumeUsage(checkParams, b.conf); - } - - public SpaceUsageSource getCurrentUsage() { - return usage.getCurrentUsage(); - } - - public void incrementUsedSpace(long usedSpace) { - usage.incrementUsedSpace(usedSpace); - } - - public void decrementUsedSpace(long reclaimedSpace) { - usage.decrementUsedSpace(reclaimedSpace); - } - - public void refreshNow() { - usage.refreshNow(); - } - - void shutdownUsageThread() { - usage.shutdown(); - } - - /** - * Only for testing. Do not use otherwise. - */ - @VisibleForTesting - public VolumeUsage getUsageForTesting() { - return usage; - } - - public long getReservedInBytes() { - return usage.getReservedBytes(); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java index 6c4f7bf35cbe..0fcab327a4d7 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java @@ -36,8 +36,61 @@ import org.slf4j.LoggerFactory; /** - * Class that wraps the space df of the Datanode Volumes used by SCM - * containers. + * Stores information about a disk/volume. + * + * Since we have a reserved space for each volume for other usage, + * let's clarify the space values a bit here: + * - used: hdds actual usage. + * - avail: remaining space for hdds usage. + * - reserved: total space for other usage. + * - capacity: total space for hdds usage. + * - other: space used by other service consuming the same volume. + * - fsAvail: reported remaining space from local fs. + * - fsUsed: reported total used space from local fs. + * - fsCapacity: reported total capacity from local fs. + * - minVolumeFreeSpace (mvfs) : determines the free space for closing + containers.This is like adding a few reserved bytes to reserved space. + Dn's will send close container action to SCM at this limit, and it is + configurable. + + * + *
+ * {@code
+ * |----used----|   (avail)   |++mvfs++|++++reserved+++++++|
+ * |<-     capacity                  ->|
+ *              |     fsAvail      |-------other-----------|
+ * |<-                   fsCapacity                      ->|
+ * }
+ *
+ * What we could directly get from local fs:
+ *     fsCapacity, fsAvail, (fsUsed = fsCapacity - fsAvail)
+ * We could get from config:
+ *     reserved
+ * Get from cmd line:
+ *     used: from cmd 'du' (by default)
+ * Get from calculation:
+ *     capacity = fsCapacity - reserved
+ *     other = fsUsed - used
+ *
+ * The avail is the result we want from calculation.
+ * So, it could be:
+ * A) avail = capacity - used
+ * B) avail = fsAvail - Max(reserved - other, 0);
+ *
+ * To be Conservative, we could get min
+ *     avail = Max(Min(A, B), 0);
+ *
+ * If we have a dedicated disk for hdds and are not using the reserved space,
+ * then we should use DedicatedDiskSpaceUsage for
+ * `hdds.datanode.du.factory.classname`,
+ * Then it is much simpler, since we don't care about other usage:
+ * {@code
+ *  |----used----|             (avail)/fsAvail              |
+ *  |<-              capacity/fsCapacity                  ->|
+ * }
+ *
+ *  We have avail == fsAvail.
+ *  
*/ public class VolumeUsage { @@ -120,7 +173,7 @@ public void refreshNow() { source.refreshNow(); } - public long getReservedBytes() { + public long getReservedInBytes() { return reservedInBytes; } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestReservedVolumeSpace.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestReservedVolumeSpace.java index bcc123520e02..b8690a784ef3 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestReservedVolumeSpace.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestReservedVolumeSpace.java @@ -69,11 +69,11 @@ public void testDefaultConfig() throws Exception { // Gets the total capacity reported by Ozone, which may be limited to less than the volume's real capacity by the // DU reserved configurations. long volumeCapacity = hddsVolume.getCurrentUsage().getCapacity(); - VolumeUsage usage = hddsVolume.getVolumeInfo().get().getUsageForTesting(); + VolumeUsage usage = hddsVolume.getVolumeInfo().get(); // Gets the actual total capacity without accounting for DU reserved space configurations. long totalCapacity = usage.realUsage().getCapacity(); - long reservedCapacity = usage.getReservedBytes(); + long reservedCapacity = usage.getReservedInBytes(); assertEquals(getExpectedDefaultReserved(hddsVolume), reservedCapacity); assertEquals(totalCapacity - reservedCapacity, volumeCapacity); @@ -93,11 +93,11 @@ public void testVolumeCapacityAfterReserve() throws Exception { HDDS_DATANODE_DIR_DU_RESERVED_PERCENT_DEFAULT); long volumeCapacity = hddsVolume.getCurrentUsage().getCapacity(); - VolumeUsage usage = hddsVolume.getVolumeInfo().get().getUsageForTesting(); + VolumeUsage usage = hddsVolume.getVolumeInfo().get(); //Gets the actual total capacity long totalCapacity = usage.realUsage().getCapacity(); - long reservedCapacity = usage.getReservedBytes(); + long reservedCapacity = usage.getReservedInBytes(); long reservedCalculated = (long) Math.ceil(totalCapacity * percentage); assertEquals(reservedCalculated, reservedCapacity); @@ -131,8 +131,8 @@ public void testFallbackToPercentConfig() throws Exception { temp.toString() + ":500B"); HddsVolume hddsVolume = volumeBuilder.conf(conf).build(); - VolumeUsage usage = hddsVolume.getVolumeInfo().get().getUsageForTesting(); - long reservedFromVolume = usage.getReservedBytes(); + VolumeUsage usage = hddsVolume.getVolumeInfo().get(); + long reservedFromVolume = usage.getReservedInBytes(); assertNotEquals(0, reservedFromVolume); long totalCapacity = usage.realUsage().getCapacity(); @@ -212,7 +212,7 @@ public void testMinFreeSpaceCalculator() throws Exception { private long getExpectedDefaultReserved(HddsVolume volume) { - long totalCapacity = volume.getVolumeInfo().get().getUsageForTesting().realUsage().getCapacity(); + long totalCapacity = volume.getVolumeInfo().get().realUsage().getCapacity(); return (long) Math.ceil(totalCapacity * HDDS_DATANODE_DIR_DU_RESERVED_PERCENT_DEFAULT); } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java index d3fc67d053e7..521a9335b825 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java @@ -23,7 +23,6 @@ import static org.assertj.core.api.Assumptions.assumeThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.File; @@ -213,8 +212,7 @@ public void testShutdown() throws Exception { // Verify that volume usage can be queried during shutdown. for (StorageVolume volume : volumesList) { - assertNotNull(volume.getVolumeInfo().get() - .getUsageForTesting()); + assertThat(volume.getVolumeInfo()).isPresent(); volume.getCurrentUsage(); } } diff --git a/hadoop-ozone/mini-cluster/src/main/java/org/apache/hadoop/ozone/UniformDatanodesFactory.java b/hadoop-ozone/mini-cluster/src/main/java/org/apache/hadoop/ozone/UniformDatanodesFactory.java index 1029860375dd..328e8a9692c7 100644 --- a/hadoop-ozone/mini-cluster/src/main/java/org/apache/hadoop/ozone/UniformDatanodesFactory.java +++ b/hadoop-ozone/mini-cluster/src/main/java/org/apache/hadoop/ozone/UniformDatanodesFactory.java @@ -159,7 +159,7 @@ public Builder setNumDataVolumes(int n) { * for each volume in each datanode. * @param reservedSpace String that contains the numeric size value and ends with a * {@link org.apache.hadoop.hdds.conf.StorageUnit} suffix. For example, "50GB". - * @see org.apache.hadoop.ozone.container.common.volume.VolumeInfo + * @see org.apache.hadoop.ozone.container.common.volume.VolumeUsage */ public Builder setReservedSpace(String reservedSpace) { this.reservedSpace = reservedSpace; From 35fe5381b1cc036b62a393388c06c78264d64ac4 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Mon, 24 Mar 2025 10:58:38 +0100 Subject: [PATCH 4/5] rename getVolumeInfo --- .../common/volume/MutableVolumeSet.java | 6 +++--- .../container/common/volume/StorageVolume.java | 2 +- .../common/volume/VolumeInfoMetrics.java | 6 +++--- .../common/impl/TestHddsDispatcher.java | 4 ++-- .../common/volume/TestReservedVolumeSpace.java | 16 ++++++++-------- .../container/common/volume/TestVolumeSet.java | 2 +- .../hadoop/ozone/TestMiniOzoneCluster.java | 2 +- 7 files changed, 19 insertions(+), 19 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java index 29cc05fcee70..2b41c4872de2 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java @@ -464,7 +464,7 @@ public StorageLocationReport[] getStorageReport() { StorageVolume volume; for (Map.Entry entry : volumeMap.entrySet()) { volume = entry.getValue(); - Optional volumeInfo = volume.getVolumeInfo(); + Optional volumeUsage = volume.getVolumeUsage(); long scmUsed = 0; long remaining = 0; long capacity = 0; @@ -472,9 +472,9 @@ public StorageLocationReport[] getStorageReport() { long spare = 0; String rootDir = volume.getVolumeRootDir(); failed = true; - if (volumeInfo.isPresent()) { + if (volumeUsage.isPresent()) { try { - SpaceUsageSource usage = volumeInfo.get().getCurrentUsage(); + SpaceUsageSource usage = volumeUsage.get().getCurrentUsage(); scmUsed = usage.getUsedSpace(); remaining = usage.getAvailable(); capacity = usage.getCapacity(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java index 28c062e43252..318fa0ab9df0 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java @@ -476,7 +476,7 @@ public void refreshVolumeUsage() { } /** @see #getCurrentUsage() */ - public Optional getVolumeInfo() { + public Optional getVolumeUsage() { return volumeUsage; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java index 7a9b465a68e9..c864f416cdb4 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java @@ -131,9 +131,9 @@ public long getContainers() { public void getMetrics(MetricsCollector collector, boolean all) { MetricsRecordBuilder builder = collector.addRecord(metricsSourceName); registry.snapshot(builder, all); - volume.getVolumeInfo().ifPresent(volumeInfo -> { - SpaceUsageSource usage = volumeInfo.getCurrentUsage(); - long reserved = volumeInfo.getReservedInBytes(); + volume.getVolumeUsage().ifPresent(volumeUsage -> { + SpaceUsageSource usage = volumeUsage.getCurrentUsage(); + long reserved = volumeUsage.getReservedInBytes(); builder .addGauge(CAPACITY, usage.getCapacity()) .addGauge(AVAILABLE, usage.getAvailable()) diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java index 1243b5d46b0b..152789751157 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java @@ -289,8 +289,8 @@ public void testContainerCloseActionWhenVolumeFull( HddsDispatcher hddsDispatcher = new HddsDispatcher( conf, containerSet, volumeSet, handlers, context, metrics, null); hddsDispatcher.setClusterId(scmId.toString()); - containerData.getVolume().getVolumeInfo() - .ifPresent(volumeInfo -> volumeInfo.incrementUsedSpace(50)); + containerData.getVolume().getVolumeUsage() + .ifPresent(usage -> usage.incrementUsedSpace(50)); usedSpace.addAndGet(50); ContainerCommandResponseProto response = hddsDispatcher .dispatch(getWriteChunkRequest(dd.getUuidString(), 1L, 1L), null); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestReservedVolumeSpace.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestReservedVolumeSpace.java index b8690a784ef3..55fc68f4f56b 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestReservedVolumeSpace.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestReservedVolumeSpace.java @@ -69,7 +69,7 @@ public void testDefaultConfig() throws Exception { // Gets the total capacity reported by Ozone, which may be limited to less than the volume's real capacity by the // DU reserved configurations. long volumeCapacity = hddsVolume.getCurrentUsage().getCapacity(); - VolumeUsage usage = hddsVolume.getVolumeInfo().get(); + VolumeUsage usage = hddsVolume.getVolumeUsage().get(); // Gets the actual total capacity without accounting for DU reserved space configurations. long totalCapacity = usage.realUsage().getCapacity(); @@ -93,7 +93,7 @@ public void testVolumeCapacityAfterReserve() throws Exception { HDDS_DATANODE_DIR_DU_RESERVED_PERCENT_DEFAULT); long volumeCapacity = hddsVolume.getCurrentUsage().getCapacity(); - VolumeUsage usage = hddsVolume.getVolumeInfo().get(); + VolumeUsage usage = hddsVolume.getVolumeUsage().get(); //Gets the actual total capacity long totalCapacity = usage.realUsage().getCapacity(); @@ -117,7 +117,7 @@ public void testReservedWhenBothConfigSet() throws Exception { folder.toString() + ":500B"); HddsVolume hddsVolume = volumeBuilder.conf(conf).build(); - long reservedFromVolume = hddsVolume.getVolumeInfo().get() + long reservedFromVolume = hddsVolume.getVolumeUsage().get() .getReservedInBytes(); assertEquals(500, reservedFromVolume); } @@ -131,7 +131,7 @@ public void testFallbackToPercentConfig() throws Exception { temp.toString() + ":500B"); HddsVolume hddsVolume = volumeBuilder.conf(conf).build(); - VolumeUsage usage = hddsVolume.getVolumeInfo().get(); + VolumeUsage usage = hddsVolume.getVolumeUsage().get(); long reservedFromVolume = usage.getReservedInBytes(); assertNotEquals(0, reservedFromVolume); @@ -151,7 +151,7 @@ public void testInvalidConfig() throws Exception { folder.toString() + ":500C"); HddsVolume hddsVolume1 = volumeBuilder.conf(conf1).build(); - long reservedFromVolume1 = hddsVolume1.getVolumeInfo().get() + long reservedFromVolume1 = hddsVolume1.getVolumeUsage().get() .getReservedInBytes(); assertEquals(getExpectedDefaultReserved(hddsVolume1), reservedFromVolume1); @@ -161,7 +161,7 @@ public void testInvalidConfig() throws Exception { conf2.set(HDDS_DATANODE_DIR_DU_RESERVED_PERCENT, "20"); HddsVolume hddsVolume2 = volumeBuilder.conf(conf2).build(); - long reservedFromVolume2 = hddsVolume2.getVolumeInfo().get() + long reservedFromVolume2 = hddsVolume2.getVolumeUsage().get() .getReservedInBytes(); assertEquals(getExpectedDefaultReserved(hddsVolume2), reservedFromVolume2); } @@ -188,7 +188,7 @@ public void testPathsCanonicalized() throws Exception { conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED, symlink + ":500B"); HddsVolume hddsVolume = volumeBuilder.conf(conf).build(); - long reservedFromVolume = hddsVolume.getVolumeInfo().get().getReservedInBytes(); + long reservedFromVolume = hddsVolume.getVolumeUsage().get().getReservedInBytes(); assertEquals(500, reservedFromVolume); } @@ -212,7 +212,7 @@ public void testMinFreeSpaceCalculator() throws Exception { private long getExpectedDefaultReserved(HddsVolume volume) { - long totalCapacity = volume.getVolumeInfo().get().realUsage().getCapacity(); + long totalCapacity = volume.getVolumeUsage().get().realUsage().getCapacity(); return (long) Math.ceil(totalCapacity * HDDS_DATANODE_DIR_DU_RESERVED_PERCENT_DEFAULT); } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java index 521a9335b825..2f27b5363f4b 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java @@ -212,7 +212,7 @@ public void testShutdown() throws Exception { // Verify that volume usage can be queried during shutdown. for (StorageVolume volume : volumesList) { - assertThat(volume.getVolumeInfo()).isPresent(); + assertThat(volume.getVolumeUsage()).isPresent(); volume.getCurrentUsage(); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java index 1ab32f738458..6e26a3ae7ac6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java @@ -279,7 +279,7 @@ public void testMultipleDataDirs() throws Exception { volumeList.forEach(storageVolume -> assertEquals( (long) StorageSize.parse(reservedSpace).getValue(), - storageVolume.getVolumeInfo().get().getReservedInBytes())); + storageVolume.getVolumeUsage().get().getReservedInBytes())); } } From 872eb18e4be61e2a7f809473b5513f17fa4ccaa2 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Mon, 24 Mar 2025 11:46:23 +0100 Subject: [PATCH 5/5] fix javadoc --- .../apache/hadoop/ozone/container/common/volume/HddsVolume.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java index d355908e5e91..b88961e04ae4 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java @@ -49,7 +49,7 @@ /** * HddsVolume represents volume in a datanode. {@link MutableVolumeSet} * maintains a list of HddsVolumes, one for each volume in the Datanode. - * {@link VolumeInfo} in encompassed by this class. + * {@link VolumeUsage} in encompassed by this class. *

* The disk layout per volume is as follows: *

../hdds/VERSION