{@literal ../hdds/<
- * Each hdds volume has its own VERSION file. The hdds volume will have one
- * clusterUuid directory for each SCM it is a part of (currently only one SCM is
- * supported).
- *
- * During DN startup, if the VERSION file exists, we verify that the
- * clusterID in the version file matches the clusterID from SCM.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
@@ -65,23 +58,22 @@ public class HddsVolume extends StorageVolume {
public static final String HDDS_VOLUME_DIR = "hdds";
- private VolumeState state;
private final VolumeIOStats volumeIOStats;
- // VERSION file properties
- private String storageID; // id of the file system
- private String clusterID; // id of the cluster
- private String datanodeUuid; // id of the DataNode
- private long cTime; // creation time of the file system state
- private int layoutVersion; // layout version of the storage data
private final AtomicLong committedBytes; // till Open containers become full
+ // The dedicated DbVolume that the db instance of this HddsVolume resides.
+ // This is optional, if null then the db instance resides on this HddsVolume.
+ private DbVolume dbVolume;
+ // The subdirectory with storageID as its name, used to build the
+ // container db path. This is initialized only once together with dbVolume,
+ // and stored as a member to prevent spawning lots of File objects.
+ private File dbParentDir;
+
/**
* Builder for HddsVolume.
*/
public static class Builder extends StorageVolume.Builder
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.container.common.volume;
+
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.fs.MockSpaceUsageCheckFactory;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.ozone.container.common.ContainerTestUtils;
+import org.apache.hadoop.ozone.container.common.utils.DatanodeStoreCache;
+import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Collections;
+import java.util.UUID;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+import static org.powermock.api.mockito.PowerMockito.when;
+
+/**
+ * Unit tests for {@link DbVolume}.
+ */
+public class TestDbVolume {
+
+ private static final String DATANODE_UUID = UUID.randomUUID().toString();
+ private static final String CLUSTER_ID = UUID.randomUUID().toString();
+ private static final OzoneConfiguration CONF = new OzoneConfiguration();
+
+ private DbVolume.Builder volumeBuilder;
+ private File versionFile;
+
+ @Rule
+ public TemporaryFolder folder = new TemporaryFolder();
+
+ @Before
+ public void setup() throws Exception {
+ File rootDir = new File(folder.getRoot(), DbVolume.DB_VOLUME_DIR);
+ volumeBuilder = new DbVolume.Builder(folder.getRoot().getPath())
+ .datanodeUuid(DATANODE_UUID)
+ .conf(CONF)
+ .usageCheckFactory(MockSpaceUsageCheckFactory.NONE);
+ versionFile = StorageVolumeUtil.getVersionFile(rootDir);
+ }
+
+ @Test
+ public void testInitializeEmptyDbVolume() throws IOException {
+ DbVolume volume = volumeBuilder.build();
+
+ // The initial state of HddsVolume should be "NOT_FORMATTED" when
+ // clusterID is not specified and the version file should not be written
+ // to disk.
+ assertNull(volume.getClusterID());
+ assertEquals(StorageType.DEFAULT, volume.getStorageType());
+ assertEquals(HddsVolume.VolumeState.NOT_FORMATTED,
+ volume.getStorageState());
+ assertFalse("Version file should not be created when clusterID is not " +
+ "known.", versionFile.exists());
+
+ // Format the volume with clusterID.
+ volume.format(CLUSTER_ID);
+
+ // The state of HddsVolume after formatting with clusterID should be
+ // NORMAL and the version file should exist.
+ assertTrue("Volume format should create Version file",
+ versionFile.exists());
+ assertEquals(CLUSTER_ID, volume.getClusterID());
+ assertEquals(HddsVolume.VolumeState.NORMAL, volume.getStorageState());
+ assertEquals(0, volume.getHddsVolumeIDs().size());
+ }
+
+ @Test
+ public void testInitializeNonEmptyDbVolume() throws IOException {
+ DbVolume volume = volumeBuilder.build();
+
+ // The initial state of HddsVolume should be "NOT_FORMATTED" when
+ // clusterID is not specified and the version file should not be written
+ // to disk.
+ assertNull(volume.getClusterID());
+ assertEquals(StorageType.DEFAULT, volume.getStorageType());
+ assertEquals(HddsVolume.VolumeState.NOT_FORMATTED,
+ volume.getStorageState());
+ assertFalse("Version file should not be created when clusterID is not " +
+ "known.", versionFile.exists());
+
+ // Format the volume with clusterID.
+ volume.format(CLUSTER_ID);
+ volume.createWorkingDir(CLUSTER_ID, null);
+
+ // The clusterIdDir should be created
+ File clusterIdDir = new File(volume.getStorageDir(), CLUSTER_ID);
+ assertTrue(clusterIdDir.exists());
+
+ // Create some subdirectories to mock db instances under this volume.
+ int numSubDirs = 5;
+ File[] subdirs = new File[numSubDirs];
+ for (int i = 0; i < numSubDirs; i++) {
+ subdirs[i] = new File(clusterIdDir, UUID.randomUUID().toString());
+ boolean res = subdirs[i].mkdir();
+ assertTrue(res);
+ }
+
+ // Rebuild the same volume to simulate DN restart.
+ volume = volumeBuilder.build();
+ assertEquals(numSubDirs, volume.getHddsVolumeIDs().size());
+ }
+
+ @Test
+ public void testDbStoreClosedOnBadDbVolume() throws IOException {
+ ContainerTestUtils.enableSchemaV3(CONF);
+
+ DbVolume dbVolume = volumeBuilder.build();
+ dbVolume.format(CLUSTER_ID);
+ dbVolume.createWorkingDir(CLUSTER_ID, null);
+
+ MutableVolumeSet dbVolumeSet = mock(MutableVolumeSet.class);
+ when(dbVolumeSet.getVolumesList())
+ .thenReturn(Collections.singletonList(dbVolume));
+
+ MutableVolumeSet hddsVolumeSet = createHddsVolumeSet(3);
+ for (HddsVolume hddsVolume : StorageVolumeUtil.getHddsVolumesList(
+ hddsVolumeSet.getVolumesList())) {
+ hddsVolume.format(CLUSTER_ID);
+ hddsVolume.createWorkingDir(CLUSTER_ID, dbVolumeSet);
+ }
+
+ // The db handlers should be in the cache
+ assertEquals(3, DatanodeStoreCache.getInstance().size());
+
+ // Make the dbVolume a bad volume
+ dbVolume.failVolume();
+
+ // The db handlers should be removed from the cache
+ assertEquals(0, DatanodeStoreCache.getInstance().size());
+ }
+
+ private MutableVolumeSet createHddsVolumeSet(int volumeNum)
+ throws IOException {
+ File[] hddsVolumeDirs = new File[volumeNum];
+ StringBuilder hddsDirs = new StringBuilder();
+ for (int i = 0; i < volumeNum; i++) {
+ hddsVolumeDirs[i] = folder.newFolder();
+ hddsDirs.append(hddsVolumeDirs[i]).append(",");
+ }
+ CONF.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, hddsDirs.toString());
+ MutableVolumeSet hddsVolumeSet = new MutableVolumeSet(DATANODE_UUID,
+ CLUSTER_ID, CONF, null, StorageVolume.VolumeType.DATA_VOLUME, null);
+ return hddsVolumeSet;
+ }
+}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java
index 3f664b48c888..9f26a0b06103 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java
@@ -20,7 +20,6 @@
import java.io.File;
import java.io.IOException;
import java.time.Duration;
-import java.util.Properties;
import java.util.UUID;
import java.util.concurrent.atomic.AtomicLong;
@@ -32,15 +31,20 @@
import org.apache.hadoop.hdds.fs.SpaceUsagePersistence;
import org.apache.hadoop.hdds.fs.SpaceUsageSource;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.ozone.container.common.helpers.DatanodeVersionFile;
-import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
import static org.apache.hadoop.hdds.fs.MockSpaceUsagePersistence.inMemory;
import static org.apache.hadoop.hdds.fs.MockSpaceUsageSource.fixed;
+import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_NAME;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
+
+import org.apache.hadoop.ozone.container.common.ContainerTestUtils;
+import org.apache.hadoop.ozone.container.common.utils.DatanodeStoreCache;
+import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
@@ -71,7 +75,7 @@ public void setup() throws Exception {
.datanodeUuid(DATANODE_UUID)
.conf(CONF)
.usageCheckFactory(MockSpaceUsageCheckFactory.NONE);
- versionFile = HddsVolumeUtil.getVersionFile(rootDir);
+ versionFile = StorageVolumeUtil.getVersionFile(rootDir);
}
@Test
@@ -100,31 +104,6 @@ public void testHddsVolumeInitialization() throws Exception {
assertEquals(HddsVolume.VolumeState.NORMAL, volume.getStorageState());
}
- @Test
- public void testReadPropertiesFromVersionFile() throws Exception {
- HddsVolume volume = volumeBuilder.build();
-
- volume.format(CLUSTER_ID);
-
- Properties properties = DatanodeVersionFile.readFrom(versionFile);
-
- String storageID = HddsVolumeUtil.getStorageID(properties, versionFile);
- String clusterID = HddsVolumeUtil.getClusterID(
- properties, versionFile, CLUSTER_ID);
- String datanodeUuid = HddsVolumeUtil.getDatanodeUUID(
- properties, versionFile, DATANODE_UUID);
- long cTime = HddsVolumeUtil.getCreationTime(
- properties, versionFile);
- int layoutVersion = HddsVolumeUtil.getLayOutVersion(
- properties, versionFile);
-
- assertEquals(volume.getStorageID(), storageID);
- assertEquals(volume.getClusterID(), clusterID);
- assertEquals(volume.getDatanodeUuid(), datanodeUuid);
- assertEquals(volume.getCTime(), cTime);
- assertEquals(volume.getLayoutVersion(), layoutVersion);
- }
-
@Test
public void testShutdown() throws Exception {
long initialUsedSpace = 250;
@@ -276,4 +255,131 @@ public void testOverUsedHddsSpace() throws IOException {
// Shutdown the volume.
volume.shutdown();
}
+
+ @Test
+ public void testDbStoreCreatedWithoutDbVolumes() throws IOException {
+ ContainerTestUtils.enableSchemaV3(CONF);
+
+ HddsVolume volume = volumeBuilder.build();
+ volume.format(CLUSTER_ID);
+ volume.createWorkingDir(CLUSTER_ID, null);
+
+ // No DbVolume chosen and use the HddsVolume itself to hold
+ // a db instance.
+ assertNull(volume.getDbVolume());
+ File storageIdDir = new File(new File(volume.getStorageDir(),
+ CLUSTER_ID), volume.getStorageID());
+ assertEquals(volume.getDbParentDir(), storageIdDir);
+
+ // The db directory should exist
+ File containerDBFile = new File(volume.getDbParentDir(),
+ CONTAINER_DB_NAME);
+ assertTrue(containerDBFile.exists());
+
+ volume.shutdown();
+ }
+
+ @Test
+ public void testDbStoreCreatedWithDbVolumes() throws IOException {
+ ContainerTestUtils.enableSchemaV3(CONF);
+
+ // create the DbVolumeSet
+ MutableVolumeSet dbVolumeSet = createDbVolumeSet();
+
+ HddsVolume volume = volumeBuilder.build();
+ volume.format(CLUSTER_ID);
+ volume.createWorkingDir(CLUSTER_ID, dbVolumeSet);
+
+ // DbVolume chosen.
+ assertNotNull(volume.getDbVolume());
+
+ File storageIdDir = new File(new File(volume.getDbVolume()
+ .getStorageDir(), CLUSTER_ID), volume.getStorageID());
+ // Db parent dir should be set to a subdir under the dbVolume.
+ assertEquals(volume.getDbParentDir(), storageIdDir);
+
+ // The db directory should exist
+ File containerDBFile = new File(volume.getDbParentDir(),
+ CONTAINER_DB_NAME);
+ assertTrue(containerDBFile.exists());
+
+ volume.shutdown();
+ }
+
+ @Test
+ public void testDbStoreClosedOnBadVolumeWithoutDbVolumes()
+ throws IOException {
+ ContainerTestUtils.enableSchemaV3(CONF);
+
+ HddsVolume volume = volumeBuilder.build();
+ volume.format(CLUSTER_ID);
+ volume.createWorkingDir(CLUSTER_ID, null);
+
+ // No DbVolume chosen and use the HddsVolume itself to hold
+ // a db instance.
+ assertNull(volume.getDbVolume());
+ File storageIdDir = new File(new File(volume.getStorageDir(),
+ CLUSTER_ID), volume.getStorageID());
+ assertEquals(volume.getDbParentDir(), storageIdDir);
+
+ // The db directory should exist
+ File containerDBFile = new File(volume.getDbParentDir(),
+ CONTAINER_DB_NAME);
+ assertTrue(containerDBFile.exists());
+ assertNotNull(DatanodeStoreCache.getInstance().getDB(
+ containerDBFile.getAbsolutePath()));
+
+ // Make it a bad volume
+ volume.failVolume();
+
+ // The db should be removed from cache
+ assertNull(DatanodeStoreCache.getInstance().getDB(
+ containerDBFile.getAbsolutePath()));
+ }
+
+ @Test
+ public void testDbStoreClosedOnBadVolumeWithDbVolumes() throws IOException {
+ ContainerTestUtils.enableSchemaV3(CONF);
+
+ // create the DbVolumeSet
+ MutableVolumeSet dbVolumeSet = createDbVolumeSet();
+
+ HddsVolume volume = volumeBuilder.build();
+ volume.format(CLUSTER_ID);
+ volume.createWorkingDir(CLUSTER_ID, dbVolumeSet);
+
+ // DbVolume chosen.
+ assertNotNull(volume.getDbVolume());
+
+ File storageIdDir = new File(new File(volume.getDbVolume()
+ .getStorageDir(), CLUSTER_ID), volume.getStorageID());
+ // Db parent dir should be set to a subdir under the dbVolume.
+ assertEquals(volume.getDbParentDir(), storageIdDir);
+
+ // The db directory should exist
+ File containerDBFile = new File(volume.getDbParentDir(),
+ CONTAINER_DB_NAME);
+ assertTrue(containerDBFile.exists());
+ assertNotNull(DatanodeStoreCache.getInstance().getDB(
+ containerDBFile.getAbsolutePath()));
+
+ // Make it a bad volume
+ volume.failVolume();
+
+ // The db should be removed from cache
+ assertNull(DatanodeStoreCache.getInstance().getDB(
+ containerDBFile.getAbsolutePath()));
+ }
+
+ private MutableVolumeSet createDbVolumeSet() throws IOException {
+ File dbVolumeDir = folder.newFolder();
+ CONF.set(OzoneConfigKeys.HDDS_DATANODE_CONTAINER_DB_DIR,
+ dbVolumeDir.getAbsolutePath());
+ MutableVolumeSet dbVolumeSet = new MutableVolumeSet(DATANODE_UUID,
+ CLUSTER_ID, CONF, null, StorageVolume.VolumeType.DB_VOLUME,
+ null);
+ dbVolumeSet.getVolumesList().get(0).format(CLUSTER_ID);
+ dbVolumeSet.getVolumesList().get(0).createWorkingDir(CLUSTER_ID, null);
+ return dbVolumeSet;
+ }
}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestStorageVolume.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestStorageVolume.java
new file mode 100644
index 000000000000..5f015204fa8b
--- /dev/null
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestStorageVolume.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.container.common.volume;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.fs.MockSpaceUsageCheckFactory;
+import org.apache.hadoop.ozone.container.common.helpers.DatanodeVersionFile;
+import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import java.io.File;
+import java.util.Properties;
+import java.util.UUID;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * Test for StorageVolume.
+ */
+public class TestStorageVolume {
+
+ private static final String DATANODE_UUID = UUID.randomUUID().toString();
+ private static final String CLUSTER_ID = UUID.randomUUID().toString();
+ private static final OzoneConfiguration CONF = new OzoneConfiguration();
+
+ @Rule
+ public TemporaryFolder folder = new TemporaryFolder();
+
+ private HddsVolume.Builder volumeBuilder;
+ private File versionFile;
+
+ @Before
+ public void setup() throws Exception {
+ File rootDir = new File(folder.getRoot(), HddsVolume.HDDS_VOLUME_DIR);
+ volumeBuilder = new HddsVolume.Builder(folder.getRoot().getPath())
+ .datanodeUuid(DATANODE_UUID)
+ .conf(CONF)
+ .usageCheckFactory(MockSpaceUsageCheckFactory.NONE);
+ versionFile = StorageVolumeUtil.getVersionFile(rootDir);
+ }
+
+ @Test
+ public void testReadPropertiesFromVersionFile() throws Exception {
+ HddsVolume volume = volumeBuilder.build();
+
+ volume.format(CLUSTER_ID);
+
+ Properties properties = DatanodeVersionFile.readFrom(versionFile);
+
+ String storageID = StorageVolumeUtil.getStorageID(properties, versionFile);
+ String clusterID = StorageVolumeUtil.getClusterID(
+ properties, versionFile, CLUSTER_ID);
+ String datanodeUuid = StorageVolumeUtil.getDatanodeUUID(
+ properties, versionFile, DATANODE_UUID);
+ long cTime = StorageVolumeUtil.getCreationTime(
+ properties, versionFile);
+ int layoutVersion = StorageVolumeUtil.getLayOutVersion(
+ properties, versionFile);
+
+ assertEquals(volume.getStorageID(), storageID);
+ assertEquals(volume.getClusterID(), clusterID);
+ assertEquals(volume.getDatanodeUuid(), datanodeUuid);
+ assertEquals(volume.getCTime(), cTime);
+ assertEquals(volume.getLayoutVersion(), layoutVersion);
+ }
+}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java
index 76e771ddcd7d..84263de93cff 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java
@@ -30,6 +30,7 @@
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.container.common.ContainerTestUtils;
import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration;
import org.apache.ozone.test.GenericTestUtils;
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
@@ -117,6 +118,7 @@ public void testBadDirectoryDetection() throws IOException {
final int numBadVolumes = 2;
conf = getConfWithDataNodeDirs(numVolumes);
+ ContainerTestUtils.enableSchemaV3(conf);
StorageVolumeChecker dummyChecker =
new DummyChecker(conf, new Timer(), numBadVolumes);
final MutableVolumeSet volumeSet = new MutableVolumeSet(
@@ -127,6 +129,10 @@ public void testBadDirectoryDetection() throws IOException {
UUID.randomUUID().toString(), conf, null,
StorageVolume.VolumeType.META_VOLUME,
dummyChecker);
+ final MutableVolumeSet dbVolumeSet = new MutableVolumeSet(
+ UUID.randomUUID().toString(), conf, null,
+ StorageVolume.VolumeType.DB_VOLUME,
+ dummyChecker);
Assert.assertEquals(volumeSet.getFailedVolumesList().size(),
numBadVolumes);
@@ -136,8 +142,14 @@ public void testBadDirectoryDetection() throws IOException {
numBadVolumes);
Assert.assertEquals(metaVolumeSet.getVolumesList().size(),
numVolumes - numBadVolumes);
+ Assert.assertEquals(dbVolumeSet.getFailedVolumesList().size(),
+ numBadVolumes);
+ Assert.assertEquals(dbVolumeSet.getVolumesList().size(),
+ numVolumes - numBadVolumes);
+
volumeSet.shutdown();
metaVolumeSet.shutdown();
+ dbVolumeSet.shutdown();
}
/**
@@ -148,6 +160,7 @@ public void testAllVolumesAreBad() throws IOException {
final int numVolumes = 5;
conf = getConfWithDataNodeDirs(numVolumes);
+ ContainerTestUtils.enableSchemaV3(conf);
StorageVolumeChecker dummyChecker =
new DummyChecker(conf, new Timer(), numVolumes);
@@ -159,13 +172,21 @@ public void testAllVolumesAreBad() throws IOException {
UUID.randomUUID().toString(), conf, null,
StorageVolume.VolumeType.META_VOLUME,
dummyChecker);
+ final MutableVolumeSet dbVolumeSet = new MutableVolumeSet(
+ UUID.randomUUID().toString(), conf, null,
+ StorageVolume.VolumeType.DB_VOLUME,
+ dummyChecker);
assertEquals(volumeSet.getFailedVolumesList().size(), numVolumes);
assertEquals(volumeSet.getVolumesList().size(), 0);
assertEquals(metaVolumeSet.getFailedVolumesList().size(), numVolumes);
assertEquals(metaVolumeSet.getVolumesList().size(), 0);
+ assertEquals(dbVolumeSet.getFailedVolumesList().size(), numVolumes);
+ assertEquals(dbVolumeSet.getVolumesList().size(), 0);
+
volumeSet.shutdown();
metaVolumeSet.shutdown();
+ dbVolumeSet.shutdown();
}
/**
@@ -188,10 +209,19 @@ private OzoneConfiguration getConfWithDataNodeDirs(int numDirs) {
}
ozoneConf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR,
String.join(",", metaDirs));
+
+ final List