diff --git a/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconAndAdminContainerCLI.java b/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconAndAdminContainerCLI.java index 70be91b78624..cf742076ca9d 100644 --- a/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconAndAdminContainerCLI.java +++ b/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconAndAdminContainerCLI.java @@ -167,7 +167,7 @@ static void init() throws Exception { String bucketName = "bucket1"; ozoneBucket = TestDataUtil.createVolumeAndBucket( - client, volumeName, bucketName, BucketLayout.FILE_SYSTEM_OPTIMIZED); + client, volumeName, bucketName, BucketLayout.FILE_SYSTEM_OPTIMIZED, null); String keyNameR3 = "key1"; containerIdR3 = setupRatisKey(recon, keyNameR3, diff --git a/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java b/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java index 487bd116d9a4..487e3e864c8e 100644 --- a/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java +++ b/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java @@ -17,6 +17,8 @@ package org.apache.hadoop.ozone.recon; +import static org.apache.hadoop.hdds.client.ReplicationFactor.THREE; +import static org.apache.hadoop.hdds.client.ReplicationType.RATIS; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; @@ -39,6 +41,8 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdds.client.DefaultReplicationConfig; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; import org.apache.hadoop.hdds.utils.IOUtils; @@ -52,6 +56,7 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.QuotaUtil; import org.apache.hadoop.ozone.recon.api.OMDBInsightEndpoint; import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResponse; import org.apache.hadoop.ozone.recon.api.types.NSSummary; @@ -80,9 +85,66 @@ public class TestReconInsightsForDeletedDirectories { private static FileSystem fs; private static String volumeName; private static String bucketName; + private static ReplicationConfig replicationConfig; private static OzoneClient client; private static ReconService recon; + protected static MiniOzoneCluster getCluster() { + return cluster; + } + + protected static void setCluster(MiniOzoneCluster cluster) { + TestReconInsightsForDeletedDirectories.cluster = cluster; + } + + protected static FileSystem getFs() { + return fs; + } + + protected static void setFs(FileSystem fs) { + TestReconInsightsForDeletedDirectories.fs = fs; + } + + protected static String getVolumeName() { + return volumeName; + } + + protected static void setVolumeName(String volumeName) { + TestReconInsightsForDeletedDirectories.volumeName = volumeName; + } + + protected static String getBucketName() { + return bucketName; + } + + protected static void setBucketName(String bucketName) { + TestReconInsightsForDeletedDirectories.bucketName = bucketName; + } + + protected static ReplicationConfig getReplicationConfig() { + return replicationConfig; + } + + protected static void setReplicationConfig(ReplicationConfig replicationConfig) { + TestReconInsightsForDeletedDirectories.replicationConfig = replicationConfig; + } + + protected static OzoneClient getClient() { + return client; + } + + protected static void setClient(OzoneClient client) { + TestReconInsightsForDeletedDirectories.client = client; + } + + protected static ReconService getRecon() { + return recon; + } + + protected static void setRecon(ReconService recon) { + TestReconInsightsForDeletedDirectories.recon = recon; + } + @BeforeAll public static void init() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); @@ -99,8 +161,9 @@ public static void init() throws Exception { client = cluster.newClient(); // create a volume and a bucket to be used by OzoneFileSystem - OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client, - BucketLayout.FILE_SYSTEM_OPTIMIZED); + replicationConfig = ReplicationConfig.fromTypeAndFactor(RATIS, THREE); + OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client, BucketLayout.FILE_SYSTEM_OPTIMIZED, + new DefaultReplicationConfig(replicationConfig)); volumeName = bucket.getVolumeName(); bucketName = bucket.getName(); @@ -147,7 +210,6 @@ public void cleanup() throws IOException { @Test public void testGetDeletedDirectoryInfo() throws Exception { - // Create a directory structure with 10 files in dir1. Path dir1 = new Path("/dir1"); fs.mkdirs(dir1); @@ -210,6 +272,7 @@ public void testGetDeletedDirectoryInfo() // Assert that the directory dir1 has 10 sub-files and size of 1000 bytes. assertEquals(10, summary.getNumOfFiles()); assertEquals(10, summary.getSizeOfFiles()); + assertEquals(QuotaUtil.getReplicatedSize(10, replicationConfig), summary.getReplicatedSizeOfFiles()); } // Delete the entire directory dir1. @@ -237,6 +300,7 @@ public void testGetDeletedDirectoryInfo() (KeyInsightInfoResponse) deletedDirInfo.getEntity(); // Assert the size of deleted directory is 10. assertEquals(10, entity.getUnreplicatedDataSize()); + assertEquals(QuotaUtil.getReplicatedSize(10, replicationConfig), entity.getReplicatedDataSize()); // Cleanup the tables. cleanupTables(); @@ -257,7 +321,6 @@ public void testGetDeletedDirectoryInfo() @Test public void testGetDeletedDirectoryInfoForNestedDirectories() throws Exception { - // Create a directory structure with 10 files and 3 nested directories. Path path = new Path("/dir1/dir2/dir3"); fs.mkdirs(path); @@ -326,6 +389,7 @@ public void testGetDeletedDirectoryInfoForNestedDirectories() (KeyInsightInfoResponse) deletedDirInfo.getEntity(); // Assert the size of deleted directory is 3. assertEquals(3, entity.getUnreplicatedDataSize()); + assertEquals(QuotaUtil.getReplicatedSize(3, replicationConfig), entity.getReplicatedDataSize()); // Cleanup the tables. cleanupTables(); @@ -388,6 +452,7 @@ public void testGetDeletedDirectoryInfoWithMultipleSubdirectories() (KeyInsightInfoResponse) deletedDirInfo.getEntity(); // Assert the size of deleted directory is 100. assertEquals(100, entity.getUnreplicatedDataSize()); + assertEquals(QuotaUtil.getReplicatedSize(100, replicationConfig), entity.getReplicatedDataSize()); // Cleanup the tables. cleanupTables(); diff --git a/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectoriesEC.java b/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectoriesEC.java new file mode 100644 index 000000000000..7b95dd6874ed --- /dev/null +++ b/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectoriesEC.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon; + +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL; + +import java.util.concurrent.TimeUnit; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.hdds.client.DefaultReplicationConfig; +import org.apache.hadoop.hdds.client.ECReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.TestDataUtil; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.junit.jupiter.api.BeforeAll; + +/** + * Test class to verify the correctness of the insights generated by Recon + * for Deleted Directories for EC ReplicationConfig. + */ +public class TestReconInsightsForDeletedDirectoriesEC + extends TestReconInsightsForDeletedDirectories { + + @BeforeAll + public static void init() throws Exception { + OzoneConfiguration conf = new OzoneConfiguration(); + conf.setInt(OZONE_DIR_DELETING_SERVICE_INTERVAL, 1000000); + conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 10000000, + TimeUnit.MILLISECONDS); + conf.setBoolean(OZONE_ACL_ENABLED, true); + setRecon(new ReconService(conf)); + setCluster(MiniOzoneCluster.newBuilder(conf) + .setNumDatanodes(5) + .addService(getRecon()) + .build()); + getCluster().waitForClusterToBeReady(); + setClient(getCluster().newClient()); + setReplicationConfig(new ECReplicationConfig("RS-3-2-1024k")); + OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(getClient(), BucketLayout.FILE_SYSTEM_OPTIMIZED, + new DefaultReplicationConfig(getReplicationConfig())); + setVolumeName(bucket.getVolumeName()); + setBucketName(bucket.getName()); + + String rootPath = String.format("%s://%s.%s/", + OzoneConsts.OZONE_URI_SCHEME, getBucketName(), getVolumeName()); + + conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); + conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 5); + + setFs(FileSystem.get(conf)); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java index eaf98317c789..230b39d5e40d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java @@ -265,7 +265,7 @@ public void testRecoveryWithoutHsyncHflushOnLastBlock() throws Exception { @Test public void testOBSRecoveryShouldFail() throws Exception { OzoneBucket obsBucket = TestDataUtil.createVolumeAndBucket(client, - "vol2", "obs", BucketLayout.OBJECT_STORE); + "vol2", "obs", BucketLayout.OBJECT_STORE, null); String obsDir = OZONE_ROOT + obsBucket.getVolumeName() + OZONE_URI_DELIMITER + obsBucket.getName(); Path obsFile = new Path(obsDir, "file" + getTestName() + FILE_COUNTER.incrementAndGet()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java index 012c7a600722..2cea4fce55b6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java @@ -104,7 +104,7 @@ public void init() throws Exception { bucketName = RandomStringUtils.secure().nextAlphabetic(10).toLowerCase(); // create a volume and a bucket to be used by OzoneFileSystem - TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName, BucketLayout.LEGACY); + TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName, BucketLayout.LEGACY, null); String rootPath = String.format("%s://%s.%s/", OZONE_URI_SCHEME, bucketName, volumeName); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemPrefixParser.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemPrefixParser.java index facea4409650..cd022dd99b4d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemPrefixParser.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemPrefixParser.java @@ -71,7 +71,7 @@ public static void init() throws Exception { // create a volume and a bucket to be used by OzoneFileSystem try (OzoneClient client = cluster.newClient()) { TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName, - BucketLayout.FILE_SYSTEM_OPTIMIZED); + BucketLayout.FILE_SYSTEM_OPTIMIZED, null); } String rootPath = String diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java index a30fc356057d..90fcfa8e2d6b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java @@ -33,6 +33,7 @@ import java.util.Scanner; import org.apache.commons.io.IOUtils; import org.apache.commons.lang3.RandomStringUtils; +import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; @@ -61,11 +62,11 @@ private TestDataUtil() { public static OzoneBucket createVolumeAndBucket(OzoneClient client, String volumeName, String bucketName) throws IOException { - return createVolumeAndBucket(client, volumeName, bucketName, getDefaultBucketLayout(client)); + return createVolumeAndBucket(client, volumeName, bucketName, getDefaultBucketLayout(client), null); } public static OzoneBucket createVolumeAndBucket(OzoneClient client, - String volumeName, String bucketName, BucketLayout bucketLayout) + String volumeName, String bucketName, BucketLayout bucketLayout, DefaultReplicationConfig replicationConfig) throws IOException { BucketArgs omBucketArgs; BucketArgs.Builder builder = BucketArgs.newBuilder(); @@ -73,6 +74,10 @@ public static OzoneBucket createVolumeAndBucket(OzoneClient client, if (bucketLayout != null) { builder.setBucketLayout(bucketLayout); } + + if (replicationConfig != null) { + builder.setDefaultReplicationConfig(replicationConfig); + } omBucketArgs = builder.build(); return createVolumeAndBucket(client, volumeName, bucketName, @@ -197,18 +202,26 @@ public static OzoneBucket createLinkedBucket(OzoneClient client, String vol, Str public static OzoneBucket createVolumeAndBucket(OzoneClient client, BucketLayout bucketLayout) throws IOException { - return createVolumeAndBucket(client, bucketLayout, false); + return createVolumeAndBucket(client, bucketLayout, null, false); } - public static OzoneBucket createVolumeAndBucket(OzoneClient client, - BucketLayout bucketLayout, boolean createLinkedBucket) throws IOException { + public static OzoneBucket createVolumeAndBucket(OzoneClient client, BucketLayout bucketLayout, + DefaultReplicationConfig replicationConfig) + throws IOException { + return createVolumeAndBucket(client, bucketLayout, replicationConfig, false); + } + + public static OzoneBucket createVolumeAndBucket(OzoneClient client, BucketLayout bucketLayout, + DefaultReplicationConfig replicationConfig, + boolean createLinkedBucket) + throws IOException { final int attempts = 5; for (int i = 0; i < attempts; i++) { try { String volumeName = "volume" + RandomStringUtils.secure().nextNumeric(5); String bucketName = "bucket" + RandomStringUtils.secure().nextNumeric(5); OzoneBucket ozoneBucket = createVolumeAndBucket(client, volumeName, bucketName, - bucketLayout); + bucketLayout, replicationConfig); if (createLinkedBucket) { String targetBucketName = ozoneBucket.getName() + RandomStringUtils.secure().nextNumeric(5); ozoneBucket = createLinkedBucket(client, volumeName, bucketName, targetBucketName); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java index 71a3ac2af7b7..cc787ca36e94 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java @@ -101,7 +101,7 @@ public void init() throws Exception { // create a volume and a bucket to be used by OzoneFileSystem TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName, - BucketLayout.OBJECT_STORE); + BucketLayout.OBJECT_STORE, null); volume = client.getObjectStore().getVolume(volumeName); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java index 8edece39908d..b5a7576fdf1a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java @@ -357,7 +357,7 @@ public void testKeyOps() throws Exception { long initialNumDeleteObjectTaggingFails = getLongCounter("NumDeleteObjectTaggingFails", omMetrics); // see HDDS-10078 for making this work with FILE_SYSTEM_OPTIMIZED layout - TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName, BucketLayout.LEGACY); + TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName, BucketLayout.LEGACY, null); OmKeyArgs keyArgs = createKeyArgs(volumeName, bucketName, RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE)); doKeyOps(keyArgs); // This will perform 7 different operations on the key @@ -487,7 +487,7 @@ public void testDirectoryOps(BucketLayout bucketLayout) throws Exception { String bucketName = UUID.randomUUID().toString(); // create bucket with different layout in each ParameterizedTest - TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName, bucketLayout); + TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName, bucketLayout, null); // Create bucket with 2 nested directories. String rootPath = String.format("%s://%s/", diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestSnapshotDeletingServiceIntegrationTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestSnapshotDeletingServiceIntegrationTest.java index f4c83fc08a5f..8db42e8bea2c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestSnapshotDeletingServiceIntegrationTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestSnapshotDeletingServiceIntegrationTest.java @@ -147,7 +147,7 @@ public void setup() throws Exception { client = cluster.newClient(); om = cluster.getOzoneManager(); bucket1 = TestDataUtil.createVolumeAndBucket( - client, VOLUME_NAME, BUCKET_NAME_ONE, BucketLayout.DEFAULT); + client, VOLUME_NAME, BUCKET_NAME_ONE, BucketLayout.DEFAULT, null); } @AfterAll diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java index 366f61990f4c..69441f580d75 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java @@ -238,7 +238,7 @@ private void init() throws Exception { cluster.waitForClusterToBeReady(); client = cluster.newClient(); // create a volume and a bucket to be used by OzoneFileSystem - ozoneBucket = TestDataUtil.createVolumeAndBucket(client, bucketLayout, createLinkedBucket); + ozoneBucket = TestDataUtil.createVolumeAndBucket(client, bucketLayout, null, createLinkedBucket); if (createLinkedBucket) { this.linkedBuckets.put(ozoneBucket.getName(), ozoneBucket.getSourceBucket()); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java index 14753394cfe3..274f83123f89 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java @@ -188,7 +188,7 @@ private void writeKey(String volumeName, String bucketName, ReplicationFactor.THREE); } TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName, - layout); + layout, null); TestDataUtil.createKey( client.getObjectStore().getVolume(volumeName).getBucket(bucketName), keyName, repConfig, "test".getBytes(StandardCharsets.UTF_8)); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java index 4c7841b757c9..354526b1fbac 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java @@ -57,6 +57,7 @@ import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; @@ -600,9 +601,9 @@ private void getPendingForDeletionDirInfo( keyEntityInfo.setKey(omKeyInfo.getFileName()); keyEntityInfo.setPath(createPath(omKeyInfo)); keyEntityInfo.setInStateSince(omKeyInfo.getCreationTime()); - keyEntityInfo.setSize( - fetchSizeForDeletedDirectory(omKeyInfo.getObjectID())); - keyEntityInfo.setReplicatedSize(omKeyInfo.getReplicatedSize()); + Pair sizeInfo = fetchSizeForDeletedDirectory(omKeyInfo.getObjectID()); + keyEntityInfo.setSize(sizeInfo.getLeft()); + keyEntityInfo.setReplicatedSize(sizeInfo.getRight()); keyEntityInfo.setReplicationConfig(omKeyInfo.getReplicationConfig()); pendingForDeletionKeyInfo.setUnreplicatedDataSize( pendingForDeletionKeyInfo.getUnreplicatedDataSize() + @@ -628,24 +629,28 @@ private void getPendingForDeletionDirInfo( } /** - * Given an object ID, return total data size (no replication) + * Given an object ID, return total data size as a pair of Total Size, Total Replicated Size * under this object. Note:- This method is RECURSIVE. * * @param objectId the object's ID - * @return total used data size in bytes + * @return total used data size and replicated total used data size in bytes * @throws IOException ioEx */ - protected long fetchSizeForDeletedDirectory(long objectId) + protected Pair fetchSizeForDeletedDirectory(long objectId) throws IOException { NSSummary nsSummary = reconNamespaceSummaryManager.getNSSummary(objectId); if (nsSummary == null) { - return 0L; + return Pair.of(0L, 0L); } long totalSize = nsSummary.getSizeOfFiles(); + long totalReplicatedSize = nsSummary.getReplicatedSizeOfFiles(); + for (long childId : nsSummary.getChildDir()) { - totalSize += fetchSizeForDeletedDirectory(childId); + Pair childSize = fetchSizeForDeletedDirectory(childId); + totalSize += childSize.getLeft(); + totalReplicatedSize += childSize.getRight(); } - return totalSize; + return Pair.of(totalSize, totalReplicatedSize); } /** This method retrieves set of directories pending for deletion. diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NSSummary.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NSSummary.java index f20fdc764af5..24b43716a93e 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NSSummary.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NSSummary.java @@ -31,24 +31,27 @@ public class NSSummary { private int numOfFiles; private long sizeOfFiles; + private long replicatedSizeOfFiles; private int[] fileSizeBucket; private Set childDir; private String dirName; private long parentId = 0; public NSSummary() { - this(0, 0L, new int[ReconConstants.NUM_OF_FILE_SIZE_BINS], + this(0, 0L, 0L, new int[ReconConstants.NUM_OF_FILE_SIZE_BINS], new HashSet<>(), "", 0); } public NSSummary(int numOfFiles, long sizeOfFiles, + long replicatedSizeOfFiles, int[] bucket, Set childDir, String dirName, long parentId) { this.numOfFiles = numOfFiles; this.sizeOfFiles = sizeOfFiles; + this.replicatedSizeOfFiles = replicatedSizeOfFiles; setFileSizeBucket(bucket); this.childDir = childDir; this.dirName = dirName; @@ -63,6 +66,10 @@ public long getSizeOfFiles() { return sizeOfFiles; } + public long getReplicatedSizeOfFiles() { + return replicatedSizeOfFiles; + } + public int[] getFileSizeBucket() { return Arrays.copyOf(fileSizeBucket, ReconConstants.NUM_OF_FILE_SIZE_BINS); } @@ -83,6 +90,10 @@ public void setSizeOfFiles(long sizeOfFiles) { this.sizeOfFiles = sizeOfFiles; } + public void setReplicatedSizeOfFiles(long replicatedSizeOfFiles) { + this.replicatedSizeOfFiles = replicatedSizeOfFiles; + } + public void setFileSizeBucket(int[] fileSizeBucket) { this.fileSizeBucket = Arrays.copyOf(fileSizeBucket, ReconConstants.NUM_OF_FILE_SIZE_BINS); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java index 92068988d76e..d1967a35f771 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java @@ -67,11 +67,12 @@ public byte[] toPersistedFormatImpl(NSSummary object) throws IOException { + (numOfChildDirs + 1) * Long.BYTES // 1 long field for parentId + list size + Short.BYTES // 2 dummy shorts to track length + dirName.length // directory name length - + Long.BYTES; // Added space for parentId serialization + + 2 * Long.BYTES; // Added space for parentId serialization and replicated size of files ByteArrayOutputStream out = new ByteArrayOutputStream(resSize); out.write(integerCodec.toPersistedFormat(object.getNumOfFiles())); out.write(longCodec.toPersistedFormat(object.getSizeOfFiles())); + out.write(longCodec.toPersistedFormat(object.getReplicatedSizeOfFiles())); out.write(shortCodec.toPersistedFormat( (short) ReconConstants.NUM_OF_FILE_SIZE_BINS)); int[] fileSizeBucket = object.getFileSizeBucket(); @@ -95,6 +96,7 @@ public NSSummary fromPersistedFormatImpl(byte[] rawData) throws IOException { NSSummary res = new NSSummary(); res.setNumOfFiles(in.readInt()); res.setSizeOfFiles(in.readLong()); + res.setReplicatedSizeOfFiles(in.readLong()); short len = in.readShort(); assert (len == (short) ReconConstants.NUM_OF_FILE_SIZE_BINS); int[] fileSizeBucket = new int[len]; @@ -136,6 +138,7 @@ public NSSummary copyObject(NSSummary object) { NSSummary copy = new NSSummary(); copy.setNumOfFiles(object.getNumOfFiles()); copy.setSizeOfFiles(object.getSizeOfFiles()); + copy.setReplicatedSizeOfFiles(object.getReplicatedSizeOfFiles()); copy.setFileSizeBucket(object.getFileSizeBucket()); copy.setChildDir(object.getChildDir()); copy.setDirName(object.getDirName()); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java index 755d966b8328..85a926df4a18 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java @@ -91,6 +91,7 @@ protected void handlePutKeyEvent(OmKeyInfo keyInfo, Map generatedIds = new HashSet<>(); - private static final String VOLUME_ONE = "volume1"; - private static final String OBS_BUCKET = "obs-bucket"; private static final String FSO_BUCKET = "fso-bucket"; private static final String EMPTY_OBS_BUCKET = "empty-obs-bucket"; @@ -256,6 +261,30 @@ public TestOmDBInsightEndPoint() { super(); } + public static Collection replicationConfigValues() { + return Arrays.asList(new Object[][]{ + {ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.THREE)}, + {ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE)}, + {ReplicationConfig.fromProto(HddsProtos.ReplicationType.EC, null, + toProto(3, 2, ECReplicationConfig.EcCodec.RS, 1024))}, + {ReplicationConfig.fromProto(HddsProtos.ReplicationType.EC, null, + toProto(6, 3, ECReplicationConfig.EcCodec.RS, 1024))}, + {ReplicationConfig.fromProto(HddsProtos.ReplicationType.EC, null, + toProto(10, 4, ECReplicationConfig.EcCodec.XOR, 4096))} + }); + } + + public static HddsProtos.ECReplicationConfig toProto(int data, int parity, ECReplicationConfig.EcCodec codec, + int ecChunkSize) { + return HddsProtos.ECReplicationConfig.newBuilder() + .setData(data) + .setParity(parity) + .setCodec(codec.toString()) + .setEcChunkSize(ecChunkSize) + .build(); + } + private long generateUniqueRandomLong() { long newValue; do { @@ -318,6 +347,26 @@ public void setUp() throws Exception { nsSummaryTaskWithFSO.reprocessWithFSO(reconOMMetadataManager); } + /** + * Releases resources (network sockets, database files) after each test run. + * This is critical to prevent resource leaks between tests, which would otherwise cause "Too many open files" errors. + */ + @AfterEach + public void tearDown() throws Exception { + + if (ozoneStorageContainerManager != null) { + ozoneStorageContainerManager.stop(); + } + + if (reconOMMetadataManager != null) { + reconOMMetadataManager.stop(); + } + + if (omMetadataManager != null) { + omMetadataManager.stop(); + } + } + @SuppressWarnings("methodlength") private void setUpOmData() throws Exception { List omKeyLocationInfoList = new ArrayList<>(); @@ -1391,14 +1440,24 @@ public void testGetDeletedKeysWithBothPrevKeyAndStartPrefixProvided() private OmKeyInfo getOmKeyInfo(String volumeName, String bucketName, String keyName, boolean isFile) { + return buildOmKeyInfo(volumeName, bucketName, keyName, isFile, + StandaloneReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE)); + } + + private OmKeyInfo getOmKeyInfo(String volumeName, String bucketName, + String keyName, boolean isFile, ReplicationConfig replicationConfig) { + return buildOmKeyInfo(volumeName, bucketName, keyName, isFile, replicationConfig); + } + + private OmKeyInfo buildOmKeyInfo(String volumeName, String bucketName, + String keyName, boolean isFile, ReplicationConfig replicationConfig) { return new OmKeyInfo.Builder() .setVolumeName(volumeName) .setBucketName(bucketName) .setKeyName(keyName) .setFile(isFile) .setObjectID(generateUniqueRandomLong()) - .setReplicationConfig(StandaloneReplicationConfig - .getInstance(HddsProtos.ReplicationFactor.ONE)) + .setReplicationConfig(replicationConfig) .setDataSize(random.nextLong()) .build(); } @@ -1503,15 +1562,17 @@ public void testGetDeletedDirInfo() throws Exception { keyInsightInfoResp.getLastKey()); } - @Test - public void testGetDirectorySizeInfo() throws Exception { + @ParameterizedTest + @MethodSource("replicationConfigValues") + public void testGetDirectorySizeInfo(ReplicationConfig replicationConfig) throws Exception { OmKeyInfo omKeyInfo1 = - getOmKeyInfo("sampleVol", "bucketOne", "dir1", false); + getOmKeyInfo("sampleVol", "bucketOne", "dir1", false, replicationConfig); OmKeyInfo omKeyInfo2 = - getOmKeyInfo("sampleVol", "bucketTwo", "dir2", false); + getOmKeyInfo("sampleVol", "bucketTwo", "dir2", false, replicationConfig); OmKeyInfo omKeyInfo3 = - getOmKeyInfo("sampleVol", "bucketThree", "dir3", false); + getOmKeyInfo("sampleVol", "bucketThree", "dir3", false, + replicationConfig); // Add 3 entries to deleted dir table for directory dir1, dir2 and dir3 // having object id 1, 2 and 3 respectively @@ -1525,11 +1586,11 @@ public void testGetDirectorySizeInfo() throws Exception { // Prepare NS summary data and populate the table Table table = omdbInsightEndpoint.getNsSummaryTable(); // Set size of files to 5 for directory object id 1 - table.put(omKeyInfo1.getObjectID(), getNsSummary(5L)); + table.put(omKeyInfo1.getObjectID(), getNsSummary(5L, replicationConfig)); // Set size of files to 6 for directory object id 2 - table.put(omKeyInfo2.getObjectID(), getNsSummary(6L)); + table.put(omKeyInfo2.getObjectID(), getNsSummary(6L, replicationConfig)); // Set size of files to 7 for directory object id 3 - table.put(omKeyInfo3.getObjectID(), getNsSummary(7L)); + table.put(omKeyInfo3.getObjectID(), getNsSummary(7L, replicationConfig)); Response deletedDirInfo = omdbInsightEndpoint.getDeletedDirInfo(-1, ""); KeyInsightInfoResponse keyInsightInfoResp = @@ -1540,15 +1601,23 @@ public void testGetDirectorySizeInfo() throws Exception { // Assert the total size under directory dir1 is 5L assertEquals(5L, keyInsightInfoResp.getDeletedDirInfoList().get(0).getSize()); + assertEquals(QuotaUtil.getReplicatedSize(5L, replicationConfig), + keyInsightInfoResp.getDeletedDirInfoList().get(0).getReplicatedSize()); // Assert the total size under directory dir2 is 6L assertEquals(6L, keyInsightInfoResp.getDeletedDirInfoList().get(1).getSize()); + assertEquals(QuotaUtil.getReplicatedSize(6L, replicationConfig), + keyInsightInfoResp.getDeletedDirInfoList().get(1).getReplicatedSize()); // Assert the total size under directory dir3 is 7L assertEquals(7L, keyInsightInfoResp.getDeletedDirInfoList().get(2).getSize()); + assertEquals(QuotaUtil.getReplicatedSize(7L, replicationConfig), + keyInsightInfoResp.getDeletedDirInfoList().get(2).getReplicatedSize()); // Assert the total of all the deleted directories is 18L assertEquals(18L, keyInsightInfoResp.getUnreplicatedDataSize()); + assertEquals(QuotaUtil.getReplicatedSize(18L, replicationConfig), + keyInsightInfoResp.getReplicatedDataSize()); } @Test @@ -2014,9 +2083,10 @@ public void testListKeysLegacyBucketWithFSEnabledAndPagination() { assertEquals("", listKeysResponse.getLastKey()); } - private NSSummary getNsSummary(long size) { + private NSSummary getNsSummary(long size, ReplicationConfig replicationConfig) { NSSummary summary = new NSSummary(); summary.setSizeOfFiles(size); + summary.setReplicatedSizeOfFiles(QuotaUtil.getReplicatedSize(size, replicationConfig)); return summary; } } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconNamespaceSummaryManagerImpl.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconNamespaceSummaryManagerImpl.java index c0931ba6d35d..e33bee042560 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconNamespaceSummaryManagerImpl.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconNamespaceSummaryManagerImpl.java @@ -112,9 +112,9 @@ public void testInitNSSummaryTable() throws IOException { private void putThreeNSMetadata() throws IOException { HashMap hmap = new HashMap<>(); - hmap.put(1L, new NSSummary(1, 2, testBucket, TEST_CHILD_DIR, "dir1", -1)); - hmap.put(2L, new NSSummary(3, 4, testBucket, TEST_CHILD_DIR, "dir2", -1)); - hmap.put(3L, new NSSummary(5, 6, testBucket, TEST_CHILD_DIR, "dir3", -1)); + hmap.put(1L, new NSSummary(1, 2, 2 * 3, testBucket, TEST_CHILD_DIR, "dir1", -1)); + hmap.put(2L, new NSSummary(3, 4, 4 * 3, testBucket, TEST_CHILD_DIR, "dir2", -1)); + hmap.put(3L, new NSSummary(5, 6, 6 * 3, testBucket, TEST_CHILD_DIR, "dir3", -1)); RDBBatchOperation rdbBatchOperation = new RDBBatchOperation(); for (Map.Entry entry: hmap.entrySet()) { reconNamespaceSummaryManager.batchStoreNSSummaries(rdbBatchOperation,