From becb84a1068c93f851acbb6750936bf102352621 Mon Sep 17 00:00:00 2001 From: tanvipenumudy <46785609+tanvipenumudy@users.noreply.github.com> Date: Tue, 22 Jul 2025 20:24:04 +0530 Subject: [PATCH] HDDS-13758. Add replicatedSizeOfFiles to NSSummary to Calculate DiskUsage --- ...estReconInsightsForDeletedDirectories.java | 84 +++++++++++----- .../org/apache/hadoop/ozone/TestDataUtil.java | 37 ++++++- .../ozone/om/snapshot/TestOmSnapshot.java | 2 +- .../om/snapshot/TestOmSnapshotFileSystem.java | 2 +- .../ReconGuiceServletContextListener.java | 4 + .../ozone/recon/api/OMDBInsightEndpoint.java | 19 ++-- .../recon/api/handlers/FSOBucketHandler.java | 44 +-------- .../ozone/recon/api/types/NSSummary.java | 13 ++- .../ozone/recon/codec/NSSummaryCodec.java | 5 +- .../tasks/NSSummaryTaskDbEventHandler.java | 22 +++-- .../recon/upgrade/ReconLayoutFeature.java | 3 +- .../ReplicatedSizeOfFilesUpgradeAction.java | 67 +++++++++++++ .../api/TestNSSummaryEndpointWithFSO.java | 27 +++-- .../recon/api/TestOmDBInsightEndPoint.java | 87 +++++++++++++--- .../ozone/recon/common/CommonUtils.java | 8 +- .../TestReconNamespaceSummaryManagerImpl.java | 6 +- .../recon/tasks/TestNSSummaryTaskWithFSO.java | 10 +- ...estReplicatedSizeOfFilesUpgradeAction.java | 99 +++++++++++++++++++ 18 files changed, 416 insertions(+), 123 deletions(-) create mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReplicatedSizeOfFilesUpgradeAction.java create mode 100644 hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/upgrade/TestReplicatedSizeOfFilesUpgradeAction.java diff --git a/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java b/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java index 5e6fe7e42695..793fe07ea528 100644 --- a/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java +++ b/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java @@ -17,6 +17,8 @@ package org.apache.hadoop.ozone.recon; +import static org.apache.hadoop.hdds.client.ReplicationFactor.THREE; +import static org.apache.hadoop.hdds.client.ReplicationType.RATIS; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; @@ -29,6 +31,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -40,6 +43,9 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdds.client.DefaultReplicationConfig; +import org.apache.hadoop.hdds.client.ECReplicationConfig; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; import org.apache.hadoop.hdds.utils.IOUtils; @@ -53,6 +59,7 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.QuotaUtil; import org.apache.hadoop.ozone.recon.api.OMDBInsightEndpoint; import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResponse; import org.apache.hadoop.ozone.recon.api.types.NSSummary; @@ -64,7 +71,9 @@ import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -78,40 +87,41 @@ public class TestReconInsightsForDeletedDirectories { LoggerFactory.getLogger(TestReconInsightsForDeletedDirectories.class); private static MiniOzoneCluster cluster; - private static FileSystem fs; + private FileSystem fs; private static OzoneClient client; private static ReconService recon; + private static OzoneConfiguration conf; @BeforeAll public static void init() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); + conf = new OzoneConfiguration(); conf.setInt(OZONE_DIR_DELETING_SERVICE_INTERVAL, 1000000); conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 10000000, TimeUnit.MILLISECONDS); conf.setBoolean(OZONE_ACL_ENABLED, true); recon = new ReconService(conf); cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(3) + .setNumDatanodes(5) .addService(recon) .build(); cluster.waitForClusterToBeReady(); client = cluster.newClient(); - // create a volume and a bucket to be used by OzoneFileSystem - OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client, - BucketLayout.FILE_SYSTEM_OPTIMIZED); - String volumeName = bucket.getVolumeName(); - String bucketName = bucket.getName(); - - String rootPath = String.format("%s://%s.%s/", - OzoneConsts.OZONE_URI_SCHEME, bucketName, volumeName); - - // Set the fs.defaultFS and start the filesystem - conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); // Set the number of keys to be processed during batch operate. conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 5); + } - fs = FileSystem.get(conf); + /** + * Provides a list of replication configurations (RATIS and EC) + * to be used for parameterized tests. + * + * @return List of replication configurations as Arguments. + */ + static List replicationConfigs() { + return Arrays.asList( + Arguments.of(ReplicationConfig.fromTypeAndFactor(RATIS, THREE)), + Arguments.of(new ECReplicationConfig("RS-3-2-1024k")) + ); } @AfterAll @@ -120,7 +130,6 @@ public static void teardown() { if (cluster != null) { cluster.shutdown(); } - IOUtils.closeQuietly(fs); } @AfterEach @@ -132,6 +141,8 @@ public void cleanup() throws IOException { fs.delete(fileStatus.getPath(), true); } }); + + IOUtils.closeQuietly(fs); } /** @@ -143,9 +154,16 @@ public void cleanup() throws IOException { * ├── ... * └── file10 */ - @Test - public void testGetDeletedDirectoryInfo() + @ParameterizedTest + @MethodSource("replicationConfigs") + public void testGetDeletedDirectoryInfo(ReplicationConfig replicationConfig) throws Exception { + OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client, BucketLayout.FILE_SYSTEM_OPTIMIZED, + new DefaultReplicationConfig(replicationConfig)); + String rootPath = String.format("%s://%s.%s/", OzoneConsts.OZONE_URI_SCHEME, bucket.getName(), + bucket.getVolumeName()); + conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); + fs = FileSystem.get(conf); // Create a directory structure with 10 files in dir1. Path dir1 = new Path("/dir1"); @@ -209,6 +227,7 @@ public void testGetDeletedDirectoryInfo() // Assert that the directory dir1 has 10 sub-files and size of 1000 bytes. assertEquals(10, summary.getNumOfFiles()); assertEquals(10, summary.getSizeOfFiles()); + assertEquals(QuotaUtil.getReplicatedSize(10, replicationConfig), summary.getReplicatedSizeOfFiles()); } // Delete the entire directory dir1. @@ -236,6 +255,7 @@ public void testGetDeletedDirectoryInfo() (KeyInsightInfoResponse) deletedDirInfo.getEntity(); // Assert the size of deleted directory is 10. assertEquals(10, entity.getUnreplicatedDataSize()); + assertEquals(QuotaUtil.getReplicatedSize(10, replicationConfig), entity.getReplicatedDataSize()); // Cleanup the tables. cleanupTables(); @@ -253,9 +273,16 @@ public void testGetDeletedDirectoryInfo() * │ │ └── file3 * */ - @Test - public void testGetDeletedDirectoryInfoForNestedDirectories() + @ParameterizedTest + @MethodSource("replicationConfigs") + public void testGetDeletedDirectoryInfoForNestedDirectories(ReplicationConfig replicationConfig) throws Exception { + OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client, BucketLayout.FILE_SYSTEM_OPTIMIZED, + new DefaultReplicationConfig(replicationConfig)); + String rootPath = String.format("%s://%s.%s/", OzoneConsts.OZONE_URI_SCHEME, bucket.getName(), + bucket.getVolumeName()); + conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); + fs = FileSystem.get(conf); // Create a directory structure with 10 files and 3 nested directories. Path path = new Path("/dir1/dir2/dir3"); @@ -325,6 +352,7 @@ public void testGetDeletedDirectoryInfoForNestedDirectories() (KeyInsightInfoResponse) deletedDirInfo.getEntity(); // Assert the size of deleted directory is 3. assertEquals(3, entity.getUnreplicatedDataSize()); + assertEquals(QuotaUtil.getReplicatedSize(3, replicationConfig), entity.getReplicatedDataSize()); // Cleanup the tables. cleanupTables(); @@ -351,9 +379,18 @@ public void testGetDeletedDirectoryInfoForNestedDirectories() * ├── ... * └── file10 */ - @Test - public void testGetDeletedDirectoryInfoWithMultipleSubdirectories() + @ParameterizedTest + @MethodSource("replicationConfigs") + public void testGetDeletedDirectoryInfoWithMultipleSubdirectories(ReplicationConfig replicationConfig) throws Exception { + OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client, BucketLayout.FILE_SYSTEM_OPTIMIZED, + new DefaultReplicationConfig(replicationConfig)); + String rootPath = String.format("%s://%s.%s/", OzoneConsts.OZONE_URI_SCHEME, bucket.getName(), + bucket.getVolumeName()); + // Set the fs.defaultFS and start the filesystem + conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); + fs = FileSystem.get(conf); + int numSubdirectories = 10; int filesPerSubdirectory = 10; @@ -387,6 +424,7 @@ public void testGetDeletedDirectoryInfoWithMultipleSubdirectories() (KeyInsightInfoResponse) deletedDirInfo.getEntity(); // Assert the size of deleted directory is 100. assertEquals(100, entity.getUnreplicatedDataSize()); + assertEquals(QuotaUtil.getReplicatedSize(100, replicationConfig), entity.getReplicatedDataSize()); // Cleanup the tables. cleanupTables(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java index a30fc356057d..7ac80ef40584 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java @@ -33,6 +33,7 @@ import java.util.Scanner; import org.apache.commons.io.IOUtils; import org.apache.commons.lang3.RandomStringUtils; +import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; @@ -65,7 +66,21 @@ public static OzoneBucket createVolumeAndBucket(OzoneClient client, } public static OzoneBucket createVolumeAndBucket(OzoneClient client, - String volumeName, String bucketName, BucketLayout bucketLayout) + String volumeName, String bucketName, BucketLayout bucketLayout) throws IOException { + BucketArgs omBucketArgs; + BucketArgs.Builder builder = BucketArgs.newBuilder(); + builder.setStorageType(StorageType.DISK); + if (bucketLayout != null) { + builder.setBucketLayout(bucketLayout); + } + omBucketArgs = builder.build(); + + return createVolumeAndBucket(client, volumeName, bucketName, + omBucketArgs); + } + + public static OzoneBucket createVolumeAndBucket(OzoneClient client, + String volumeName, String bucketName, BucketLayout bucketLayout, DefaultReplicationConfig replicationConfig) throws IOException { BucketArgs omBucketArgs; BucketArgs.Builder builder = BucketArgs.newBuilder(); @@ -73,6 +88,10 @@ public static OzoneBucket createVolumeAndBucket(OzoneClient client, if (bucketLayout != null) { builder.setBucketLayout(bucketLayout); } + + if (replicationConfig != null) { + builder.setDefaultReplicationConfig(replicationConfig); + } omBucketArgs = builder.build(); return createVolumeAndBucket(client, volumeName, bucketName, @@ -197,18 +216,26 @@ public static OzoneBucket createLinkedBucket(OzoneClient client, String vol, Str public static OzoneBucket createVolumeAndBucket(OzoneClient client, BucketLayout bucketLayout) throws IOException { - return createVolumeAndBucket(client, bucketLayout, false); + return createVolumeAndBucket(client, bucketLayout, null, false); } - public static OzoneBucket createVolumeAndBucket(OzoneClient client, - BucketLayout bucketLayout, boolean createLinkedBucket) throws IOException { + public static OzoneBucket createVolumeAndBucket(OzoneClient client, BucketLayout bucketLayout, + DefaultReplicationConfig replicationConfig) + throws IOException { + return createVolumeAndBucket(client, bucketLayout, replicationConfig, false); + } + + public static OzoneBucket createVolumeAndBucket(OzoneClient client, BucketLayout bucketLayout, + DefaultReplicationConfig replicationConfig, + boolean createLinkedBucket) + throws IOException { final int attempts = 5; for (int i = 0; i < attempts; i++) { try { String volumeName = "volume" + RandomStringUtils.secure().nextNumeric(5); String bucketName = "bucket" + RandomStringUtils.secure().nextNumeric(5); OzoneBucket ozoneBucket = createVolumeAndBucket(client, volumeName, bucketName, - bucketLayout); + bucketLayout, replicationConfig); if (createLinkedBucket) { String targetBucketName = ozoneBucket.getName() + RandomStringUtils.secure().nextNumeric(5); ozoneBucket = createLinkedBucket(client, volumeName, bucketName, targetBucketName); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java index aac18d5d36d1..93dba945d46d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java @@ -244,7 +244,7 @@ private void init() throws Exception { cluster.waitForClusterToBeReady(); client = cluster.newClient(); // create a volume and a bucket to be used by OzoneFileSystem - ozoneBucket = TestDataUtil.createVolumeAndBucket(client, bucketLayout, createLinkedBucket); + ozoneBucket = TestDataUtil.createVolumeAndBucket(client, bucketLayout, null, createLinkedBucket); if (createLinkedBucket) { this.linkedBuckets.put(ozoneBucket.getName(), ozoneBucket.getSourceBucket()); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystem.java index 7db6c8d41db6..fca8b137b720 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystem.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystem.java @@ -132,7 +132,7 @@ public void setupFsClient() throws IOException { writeClient = objectStore.getClientProxy().getOzoneManagerClient(); ozoneManager = cluster().getOzoneManager(); - OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client, bucketLayout, createLinkedBuckets); + OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client, bucketLayout, null, createLinkedBuckets); if (createLinkedBuckets) { linkedBucketMaps.put(bucket.getName(), bucket.getSourceBucket()); } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconGuiceServletContextListener.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconGuiceServletContextListener.java index cbc78a0139a7..3a4e684cc7dc 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconGuiceServletContextListener.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconGuiceServletContextListener.java @@ -33,6 +33,10 @@ public Injector getInjector() { return injector; } + public static Injector getStaticInjector() { + return injector; + } + static void setInjector(Injector inj) { injector = inj; } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java index 2a4ed2bd5fa3..5e23369bdbc9 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java @@ -57,6 +57,7 @@ import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; @@ -600,9 +601,9 @@ private void getPendingForDeletionDirInfo( keyEntityInfo.setKey(omKeyInfo.getFileName()); keyEntityInfo.setPath(createPath(omKeyInfo)); keyEntityInfo.setInStateSince(omKeyInfo.getCreationTime()); - keyEntityInfo.setSize( - fetchSizeForDeletedDirectory(omKeyInfo.getObjectID())); - keyEntityInfo.setReplicatedSize(omKeyInfo.getReplicatedSize()); + Pair sizeInfo = fetchSizeForDeletedDirectory(omKeyInfo.getObjectID()); + keyEntityInfo.setSize(sizeInfo.getLeft()); + keyEntityInfo.setReplicatedSize(sizeInfo.getRight()); keyEntityInfo.setReplicationConfig(omKeyInfo.getReplicationConfig()); pendingForDeletionKeyInfo.setUnreplicatedDataSize( pendingForDeletionKeyInfo.getUnreplicatedDataSize() + @@ -628,20 +629,20 @@ private void getPendingForDeletionDirInfo( } /** - * Given an object ID, return total data size (no replication) + * Given an object ID, return total data size as a pair of Total Size, Total Replicated Size * under this object. Note:- This method is RECURSIVE. * * @param objectId the object's ID - * @return total used data size in bytes + * @return total used data size and replicated total used data size in bytes * @throws IOException ioEx */ - protected long fetchSizeForDeletedDirectory(long objectId) + protected Pair fetchSizeForDeletedDirectory(long objectId) throws IOException { NSSummary nsSummary = reconNamespaceSummaryManager.getNSSummary(objectId); - if (nsSummary == null) { - return 0L; + if (nsSummary != null) { + return Pair.of(nsSummary.getSizeOfFiles(), nsSummary.getReplicatedSizeOfFiles()); } - return nsSummary.getSizeOfFiles(); + return Pair.of(0L, 0L); } /** This method retrieves set of directories pending for deletion. diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/FSOBucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/FSOBucketHandler.java index 845e27b5bde6..7d482745c21b 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/FSOBucketHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/FSOBucketHandler.java @@ -24,7 +24,6 @@ import java.nio.file.Paths; import java.util.Iterator; import java.util.List; -import java.util.Set; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.om.helpers.BucketLayout; @@ -115,48 +114,9 @@ public EntityType determineKeyPath(String keyName) @Override public long calculateDUUnderObject(long parentId) throws IOException { - Table keyTable = getOmMetadataManager().getFileTable(); - - long totalDU = 0L; - try (TableIterator> - iterator = keyTable.iterator()) { - - String seekPrefix = OM_KEY_PREFIX + - volumeId + - OM_KEY_PREFIX + - bucketId + - OM_KEY_PREFIX + - parentId + - OM_KEY_PREFIX; - iterator.seek(seekPrefix); - // handle direct keys - while (iterator.hasNext()) { - Table.KeyValue kv = iterator.next(); - String dbKey = kv.getKey(); - // since the RocksDB is ordered, seek until the prefix isn't matched - if (!dbKey.startsWith(seekPrefix)) { - break; - } - OmKeyInfo keyInfo = kv.getValue(); - if (keyInfo != null) { - totalDU += keyInfo.getReplicatedSize(); - } - } - } - - // handle nested keys (DFS) NSSummary nsSummary = getReconNamespaceSummaryManager() - .getNSSummary(parentId); - // empty bucket - if (nsSummary == null) { - return 0; - } - - Set subDirIds = nsSummary.getChildDir(); - for (long subDirId: subDirIds) { - totalDU += calculateDUUnderObject(subDirId); - } - return totalDU; + .getNSSummary(parentId); + return nsSummary != null ? nsSummary.getReplicatedSizeOfFiles() : 0L; } /** diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NSSummary.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NSSummary.java index b9075ca53b41..db06f99106c7 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NSSummary.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NSSummary.java @@ -37,24 +37,27 @@ public class NSSummary { // for performance optimization, not just direct files in this directory private int numOfFiles; private long sizeOfFiles; + private long replicatedSizeOfFiles; private int[] fileSizeBucket; private Set childDir; private String dirName; private long parentId = 0; public NSSummary() { - this(0, 0L, new int[ReconConstants.NUM_OF_FILE_SIZE_BINS], + this(0, 0L, 0L, new int[ReconConstants.NUM_OF_FILE_SIZE_BINS], new HashSet<>(), "", 0); } public NSSummary(int numOfFiles, long sizeOfFiles, + long replicatedSizeOfFiles, int[] bucket, Set childDir, String dirName, long parentId) { this.numOfFiles = numOfFiles; this.sizeOfFiles = sizeOfFiles; + this.replicatedSizeOfFiles = replicatedSizeOfFiles; setFileSizeBucket(bucket); this.childDir = childDir; this.dirName = dirName; @@ -75,6 +78,10 @@ public long getSizeOfFiles() { return sizeOfFiles; } + public long getReplicatedSizeOfFiles() { + return replicatedSizeOfFiles; + } + public int[] getFileSizeBucket() { return Arrays.copyOf(fileSizeBucket, ReconConstants.NUM_OF_FILE_SIZE_BINS); } @@ -101,6 +108,10 @@ public void setSizeOfFiles(long sizeOfFiles) { this.sizeOfFiles = sizeOfFiles; } + public void setReplicatedSizeOfFiles(long replicatedSizeOfFiles) { + this.replicatedSizeOfFiles = replicatedSizeOfFiles; + } + public void setFileSizeBucket(int[] fileSizeBucket) { this.fileSizeBucket = Arrays.copyOf(fileSizeBucket, ReconConstants.NUM_OF_FILE_SIZE_BINS); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java index 92068988d76e..d1967a35f771 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java @@ -67,11 +67,12 @@ public byte[] toPersistedFormatImpl(NSSummary object) throws IOException { + (numOfChildDirs + 1) * Long.BYTES // 1 long field for parentId + list size + Short.BYTES // 2 dummy shorts to track length + dirName.length // directory name length - + Long.BYTES; // Added space for parentId serialization + + 2 * Long.BYTES; // Added space for parentId serialization and replicated size of files ByteArrayOutputStream out = new ByteArrayOutputStream(resSize); out.write(integerCodec.toPersistedFormat(object.getNumOfFiles())); out.write(longCodec.toPersistedFormat(object.getSizeOfFiles())); + out.write(longCodec.toPersistedFormat(object.getReplicatedSizeOfFiles())); out.write(shortCodec.toPersistedFormat( (short) ReconConstants.NUM_OF_FILE_SIZE_BINS)); int[] fileSizeBucket = object.getFileSizeBucket(); @@ -95,6 +96,7 @@ public NSSummary fromPersistedFormatImpl(byte[] rawData) throws IOException { NSSummary res = new NSSummary(); res.setNumOfFiles(in.readInt()); res.setSizeOfFiles(in.readLong()); + res.setReplicatedSizeOfFiles(in.readLong()); short len = in.readShort(); assert (len == (short) ReconConstants.NUM_OF_FILE_SIZE_BINS); int[] fileSizeBucket = new int[len]; @@ -136,6 +138,7 @@ public NSSummary copyObject(NSSummary object) { NSSummary copy = new NSSummary(); copy.setNumOfFiles(object.getNumOfFiles()); copy.setSizeOfFiles(object.getSizeOfFiles()); + copy.setReplicatedSizeOfFiles(object.getReplicatedSizeOfFiles()); copy.setFileSizeBucket(object.getFileSizeBucket()); copy.setChildDir(object.getChildDir()); copy.setDirName(object.getDirName()); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java index 5d2f747f9405..4a465de18613 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java @@ -102,6 +102,7 @@ protected void handlePutKeyEvent(OmKeyInfo keyInfo, Map 0 || existingNumOfFiles > 0)) { - propagateSizeUpwards(parentObjectId, existingSizeOfFiles, existingNumOfFiles, nsSummaryMap); + propagateSizeUpwards(parentObjectId, existingSizeOfFiles, + existingReplicatedSizeOfFiles, existingNumOfFiles, nsSummaryMap); } } @@ -185,12 +188,14 @@ protected void handleDeleteKeyEvent(OmKeyInfo keyInfo, // Decrement immediate parent's totals (these fields now represent totals) nsSummary.setNumOfFiles(nsSummary.getNumOfFiles() - 1); nsSummary.setSizeOfFiles(nsSummary.getSizeOfFiles() - keyInfo.getDataSize()); + nsSummary.setReplicatedSizeOfFiles(nsSummary.getReplicatedSizeOfFiles() - keyInfo.getReplicatedSize()); --fileBucket[binIndex]; nsSummary.setFileSizeBucket(fileBucket); nsSummaryMap.put(parentObjectId, nsSummary); // Propagate upwards to all parents in the parent chain - propagateSizeUpwards(parentObjectId, -keyInfo.getDataSize(), -1, nsSummaryMap); + propagateSizeUpwards(parentObjectId, -keyInfo.getDataSize(), + -keyInfo.getReplicatedSize(), -1, nsSummaryMap); } protected void handleDeleteDirEvent(OmDirectoryInfo directoryInfo, @@ -222,10 +227,12 @@ protected void handleDeleteDirEvent(OmDirectoryInfo directoryInfo, // Decrement parent's totals by the deleted directory's totals parentNsSummary.setNumOfFiles(parentNsSummary.getNumOfFiles() - deletedDirSummary.getNumOfFiles()); parentNsSummary.setSizeOfFiles(parentNsSummary.getSizeOfFiles() - deletedDirSummary.getSizeOfFiles()); + parentNsSummary.setReplicatedSizeOfFiles( + parentNsSummary.getReplicatedSizeOfFiles() - deletedDirSummary.getReplicatedSizeOfFiles()); // Propagate the decrements upwards to all ancestors - propagateSizeUpwards(parentObjectId, -deletedDirSummary.getSizeOfFiles(), - -deletedDirSummary.getNumOfFiles(), nsSummaryMap); + propagateSizeUpwards(parentObjectId, -deletedDirSummary.getSizeOfFiles(), + -deletedDirSummary.getReplicatedSizeOfFiles(), -deletedDirSummary.getNumOfFiles(), nsSummaryMap); // Set the deleted directory's parentId to 0 (unlink it) deletedDirSummary.setParentId(0); @@ -274,7 +281,7 @@ protected boolean flushAndCommitUpdatedNSToDB(Map nsSummaryMap, * This ensures that when files are added/deleted, all ancestor directories * reflect the total changes in their sizeOfFiles and numOfFiles fields. */ - protected void propagateSizeUpwards(long objectId, long sizeChange, + protected void propagateSizeUpwards(long objectId, long sizeChange, long replicatedSizeChange, int countChange, Map nsSummaryMap) throws IOException { // Get the current directory's NSSummary @@ -297,11 +304,12 @@ protected void propagateSizeUpwards(long objectId, long sizeChange, if (parentSummary != null) { // Update parent's totals parentSummary.setSizeOfFiles(parentSummary.getSizeOfFiles() + sizeChange); + parentSummary.setReplicatedSizeOfFiles(parentSummary.getReplicatedSizeOfFiles() + replicatedSizeChange); parentSummary.setNumOfFiles(parentSummary.getNumOfFiles() + countChange); nsSummaryMap.put(parentId, parentSummary); // Recursively propagate to grandparents - propagateSizeUpwards(parentId, sizeChange, countChange, nsSummaryMap); + propagateSizeUpwards(parentId, sizeChange, replicatedSizeChange, countChange, nsSummaryMap); } } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReconLayoutFeature.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReconLayoutFeature.java index bd0b52ae1833..e55e64105577 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReconLayoutFeature.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReconLayoutFeature.java @@ -34,7 +34,8 @@ public enum ReconLayoutFeature { UNHEALTHY_CONTAINER_REPLICA_MISMATCH(2, "Adding replica mismatch state to the unhealthy container table"), // HDDS-13432: Materialize NSSummary totals and rebuild tree on upgrade - NSSUMMARY_AGGREGATED_TOTALS(3, "Aggregated totals for NSSummary and auto-rebuild on upgrade"); + NSSUMMARY_AGGREGATED_TOTALS(3, "Aggregated totals for NSSummary and auto-rebuild on upgrade"), + REPLICATED_SIZE_OF_FILES(4, "Adds replicatedSizeOfFiles to NSSummary"); private final int version; private final String description; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReplicatedSizeOfFilesUpgradeAction.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReplicatedSizeOfFilesUpgradeAction.java new file mode 100644 index 000000000000..8b61b179f1be --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReplicatedSizeOfFilesUpgradeAction.java @@ -0,0 +1,67 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.upgrade; + +import com.google.inject.Injector; +import javax.sql.DataSource; +import org.apache.hadoop.ozone.recon.ReconGuiceServletContextListener; +import org.apache.hadoop.ozone.recon.tasks.ReconTaskController; +import org.apache.hadoop.ozone.recon.tasks.ReconTaskReInitializationEvent; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Upgrade action for the REPLICATED_SIZE_OF_FILES layout feature. + * The action triggers a full rebuild of the NSSummary ensuring that the new field: replicatedSizeOfFiles is correctly + * populated for all objects. + */ +@UpgradeActionRecon(feature = ReconLayoutFeature.REPLICATED_SIZE_OF_FILES, + type = ReconUpgradeAction.UpgradeActionType.FINALIZE) +public class ReplicatedSizeOfFilesUpgradeAction implements ReconUpgradeAction { + + private static final Logger LOG = LoggerFactory.getLogger(ReplicatedSizeOfFilesUpgradeAction.class); + + @Override + public void execute(DataSource dataSource) { + try { + Injector injector = ReconGuiceServletContextListener.getStaticInjector(); + if (injector == null) { + throw new IllegalStateException("Guice injector is not initialized. Cannot perform NSSummary rebuild."); + } + ReconTaskController reconTaskController = injector.getInstance(ReconTaskController.class); + LOG.info("Starting full rebuild of NSSummary for REPLICATED_SIZE_OF_FILES upgrade..."); + ReconTaskController.ReInitializationResult result = reconTaskController.queueReInitializationEvent( + ReconTaskReInitializationEvent.ReInitializationReason.MANUAL_TRIGGER); + if (result != ReconTaskController.ReInitializationResult.SUCCESS) { + LOG.error( + "Failed to queue reinitialization event for manual trigger (result: {}), failing the reinitialization " + + "during NSSummaryAggregatedTotalsUpgrade action, will be retried as part of syncDataFromOM " + + "scheduler task.", result); + } + LOG.info("Completed full rebuild of NSSummary for REPLICATED_SIZE_OF_FILES upgrade."); + } catch (Exception e) { + LOG.error("Error during NSSummary rebuild for REPLICATED_SIZE_OF_FILES upgrade.", e); + throw new RuntimeException("Failed to rebuild NSSummary during upgrade", e); + } + } + + @Override + public UpgradeActionType getType() { + return UpgradeActionType.FINALIZE; + } +} diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java index 227330c83f67..54e801baa34f 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java @@ -334,22 +334,22 @@ public class TestNSSummaryEndpointWithFSO { // some expected answers private static final long ROOT_DATA_SIZE = KEY_ONE_SIZE + KEY_TWO_SIZE + - KEY_THREE_SIZE + KEY_FOUR_SIZE + KEY_FIVE_SIZE + KEY_SIX_SIZE + + KEY_THREE_SIZE + KEY_FOUR_SIZE + KEY_FIVE_SIZE + KEY_SIX_SIZE + KEY_SEVEN_SIZE + KEY_EIGHT_SIZE + KEY_NINE_SIZE + KEY_TEN_SIZE + KEY_ELEVEN_SIZE; private static final long VOL_DATA_SIZE = KEY_ONE_SIZE + KEY_TWO_SIZE + - KEY_THREE_SIZE + KEY_FOUR_SIZE + KEY_FIVE_SIZE + KEY_SIX_SIZE; + KEY_THREE_SIZE + KEY_FOUR_SIZE + KEY_FIVE_SIZE + KEY_SIX_SIZE + KEY_SEVEN_SIZE; private static final long VOL_TWO_DATA_SIZE = KEY_EIGHT_SIZE + KEY_NINE_SIZE + KEY_TEN_SIZE + KEY_ELEVEN_SIZE; private static final long BUCKET_ONE_DATA_SIZE = KEY_ONE_SIZE + KEY_TWO_SIZE + - KEY_THREE_SIZE + KEY_SIX_SIZE; + KEY_THREE_SIZE + KEY_SIX_SIZE + KEY_SEVEN_SIZE; private static final long BUCKET_TWO_DATA_SIZE = KEY_FOUR_SIZE + KEY_FIVE_SIZE; private static final long DIR_ONE_DATA_SIZE = KEY_TWO_SIZE + - KEY_THREE_SIZE + KEY_SIX_SIZE; + KEY_THREE_SIZE + KEY_SIX_SIZE + KEY_SEVEN_SIZE; @BeforeEach public void setUp() throws Exception { @@ -675,10 +675,10 @@ public void testQuotaUsage() throws Exception { @Test public void testFileSizeDist() throws Exception { - checkFileSizeDist(ROOT_PATH, 2, 3, 4, 1); - checkFileSizeDist(VOL_PATH, 2, 1, 2, 1); - checkFileSizeDist(BUCKET_ONE_PATH, 1, 1, 1, 1); - checkFileSizeDist(DIR_ONE_PATH, 0, 1, 1, 1); + checkFileSizeDist(ROOT_PATH, 2, 3, 4, 2); + checkFileSizeDist(VOL_PATH, 2, 1, 2, 2); + checkFileSizeDist(BUCKET_ONE_PATH, 1, 1, 1, 2); + checkFileSizeDist(DIR_ONE_PATH, 0, 1, 1, 2); } public void checkFileSizeDist(String path, int bin0, @@ -898,6 +898,17 @@ private void populateOMDB() throws Exception { VOL_OBJECT_ID, KEY_SIX_SIZE, getBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_SEVEN, + BUCKET_ONE, + VOL, + FILE_SEVEN, + KEY_SEVEN_OBJECT_ID, + DIR_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + KEY_SEVEN_SIZE, + getBucketLayout()); writeKeyToOm(reconOMMetadataManager, KEY_EIGHT, BUCKET_THREE, diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java index 7a705a52b243..6e482db2ac94 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java @@ -39,6 +39,8 @@ import java.nio.file.Path; import java.sql.Timestamp; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.HashSet; import java.util.List; @@ -49,6 +51,7 @@ import java.util.stream.Collectors; import javax.ws.rs.core.Response; import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -64,6 +67,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.helpers.QuotaUtil; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.recon.ReconTestInjector; import org.apache.hadoop.ozone.recon.ReconUtils; @@ -88,9 +92,12 @@ import org.apache.hadoop.ozone.recon.tasks.NSSummaryTaskWithOBS; import org.apache.ozone.recon.schema.generated.tables.daos.GlobalStatsDao; import org.apache.ozone.recon.schema.generated.tables.pojos.GlobalStats; +import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; /** * Unit test for OmDBInsightEndPoint. @@ -106,9 +113,7 @@ public class TestOmDBInsightEndPoint extends AbstractReconSqlDBTest { private Random random = new Random(); private OzoneConfiguration ozoneConfiguration; private Set generatedIds = new HashSet<>(); - private static final String VOLUME_ONE = "volume1"; - private static final String OBS_BUCKET = "obs-bucket"; private static final String FSO_BUCKET = "fso-bucket"; private static final String EMPTY_OBS_BUCKET = "empty-obs-bucket"; @@ -250,6 +255,30 @@ public TestOmDBInsightEndPoint() { super(); } + public static Collection replicationConfigValues() { + return Arrays.asList(new Object[][]{ + {ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.THREE)}, + {ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE)}, + {ReplicationConfig.fromProto(HddsProtos.ReplicationType.EC, null, + toProto(3, 2, ECReplicationConfig.EcCodec.RS, 1024))}, + {ReplicationConfig.fromProto(HddsProtos.ReplicationType.EC, null, + toProto(6, 3, ECReplicationConfig.EcCodec.RS, 1024))}, + {ReplicationConfig.fromProto(HddsProtos.ReplicationType.EC, null, + toProto(10, 4, ECReplicationConfig.EcCodec.XOR, 4096))} + }); + } + + public static HddsProtos.ECReplicationConfig toProto(int data, int parity, ECReplicationConfig.EcCodec codec, + int ecChunkSize) { + return HddsProtos.ECReplicationConfig.newBuilder() + .setData(data) + .setParity(parity) + .setCodec(codec.toString()) + .setEcChunkSize(ecChunkSize) + .build(); + } + private long generateUniqueRandomLong() { long newValue; do { @@ -312,6 +341,17 @@ public void setUp() throws Exception { nsSummaryTaskWithFSO.reprocessWithFSO(reconOMMetadataManager); } + /** + * Releases resources (network sockets, database files) after each test run. + * This is critical to prevent resource leaks between tests, which would otherwise cause "Too many open files" errors. + */ + @AfterEach + public void tearDown() throws Exception { + if (reconOMMetadataManager != null) { + reconOMMetadataManager.stop(); + } + } + @SuppressWarnings("methodlength") private void setUpOmData() throws Exception { List omKeyLocationInfoList = new ArrayList<>(); @@ -1385,14 +1425,24 @@ public void testGetDeletedKeysWithBothPrevKeyAndStartPrefixProvided() private OmKeyInfo getOmKeyInfo(String volumeName, String bucketName, String keyName, boolean isFile) { + return buildOmKeyInfo(volumeName, bucketName, keyName, isFile, + StandaloneReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE)); + } + + private OmKeyInfo getOmKeyInfo(String volumeName, String bucketName, + String keyName, boolean isFile, ReplicationConfig replicationConfig) { + return buildOmKeyInfo(volumeName, bucketName, keyName, isFile, replicationConfig); + } + + private OmKeyInfo buildOmKeyInfo(String volumeName, String bucketName, + String keyName, boolean isFile, ReplicationConfig replicationConfig) { return new OmKeyInfo.Builder() .setVolumeName(volumeName) .setBucketName(bucketName) .setKeyName(keyName) .setFile(isFile) .setObjectID(generateUniqueRandomLong()) - .setReplicationConfig(StandaloneReplicationConfig - .getInstance(HddsProtos.ReplicationFactor.ONE)) + .setReplicationConfig(replicationConfig) .setDataSize(random.nextLong()) .build(); } @@ -1497,15 +1547,17 @@ public void testGetDeletedDirInfo() throws Exception { keyInsightInfoResp.getLastKey()); } - @Test - public void testGetDirectorySizeInfo() throws Exception { + @ParameterizedTest + @MethodSource("replicationConfigValues") + public void testGetDirectorySizeInfo(ReplicationConfig replicationConfig) throws Exception { OmKeyInfo omKeyInfo1 = - getOmKeyInfo("sampleVol", "bucketOne", "dir1", false); + getOmKeyInfo("sampleVol", "bucketOne", "dir1", false, replicationConfig); OmKeyInfo omKeyInfo2 = - getOmKeyInfo("sampleVol", "bucketTwo", "dir2", false); + getOmKeyInfo("sampleVol", "bucketTwo", "dir2", false, replicationConfig); OmKeyInfo omKeyInfo3 = - getOmKeyInfo("sampleVol", "bucketThree", "dir3", false); + getOmKeyInfo("sampleVol", "bucketThree", "dir3", false, + replicationConfig); // Add 3 entries to deleted dir table for directory dir1, dir2 and dir3 // having object id 1, 2 and 3 respectively @@ -1519,11 +1571,11 @@ public void testGetDirectorySizeInfo() throws Exception { // Prepare NS summary data and populate the table Table table = omdbInsightEndpoint.getNsSummaryTable(); // Set size of files to 5 for directory object id 1 - table.put(omKeyInfo1.getObjectID(), getNsSummary(5L)); + table.put(omKeyInfo1.getObjectID(), getNsSummary(5L, replicationConfig)); // Set size of files to 6 for directory object id 2 - table.put(omKeyInfo2.getObjectID(), getNsSummary(6L)); + table.put(omKeyInfo2.getObjectID(), getNsSummary(6L, replicationConfig)); // Set size of files to 7 for directory object id 3 - table.put(omKeyInfo3.getObjectID(), getNsSummary(7L)); + table.put(omKeyInfo3.getObjectID(), getNsSummary(7L, replicationConfig)); Response deletedDirInfo = omdbInsightEndpoint.getDeletedDirInfo(-1, ""); KeyInsightInfoResponse keyInsightInfoResp = @@ -1534,15 +1586,23 @@ public void testGetDirectorySizeInfo() throws Exception { // Assert the total size under directory dir1 is 5L assertEquals(5L, keyInsightInfoResp.getDeletedDirInfoList().get(0).getSize()); + assertEquals(QuotaUtil.getReplicatedSize(5L, replicationConfig), + keyInsightInfoResp.getDeletedDirInfoList().get(0).getReplicatedSize()); // Assert the total size under directory dir2 is 6L assertEquals(6L, keyInsightInfoResp.getDeletedDirInfoList().get(1).getSize()); + assertEquals(QuotaUtil.getReplicatedSize(6L, replicationConfig), + keyInsightInfoResp.getDeletedDirInfoList().get(1).getReplicatedSize()); // Assert the total size under directory dir3 is 7L assertEquals(7L, keyInsightInfoResp.getDeletedDirInfoList().get(2).getSize()); + assertEquals(QuotaUtil.getReplicatedSize(7L, replicationConfig), + keyInsightInfoResp.getDeletedDirInfoList().get(2).getReplicatedSize()); // Assert the total of all the deleted directories is 18L assertEquals(18L, keyInsightInfoResp.getUnreplicatedDataSize()); + assertEquals(QuotaUtil.getReplicatedSize(18L, replicationConfig), + keyInsightInfoResp.getReplicatedDataSize()); } @Test @@ -2008,9 +2068,10 @@ public void testListKeysLegacyBucketWithFSEnabledAndPagination() { assertEquals("", listKeysResponse.getLastKey()); } - private NSSummary getNsSummary(long size) { + private NSSummary getNsSummary(long size, ReplicationConfig replicationConfig) { NSSummary summary = new NSSummary(); summary.setSizeOfFiles(size); + summary.setReplicatedSizeOfFiles(QuotaUtil.getReplicatedSize(size, replicationConfig)); return summary; } } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/common/CommonUtils.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/common/CommonUtils.java index df211867264c..eb3671c0fe79 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/common/CommonUtils.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/common/CommonUtils.java @@ -90,7 +90,7 @@ public void testNSSummaryBasicInfoRoot( assertEquals(2, rootResponseObj.getCountStats().getNumVolume()); assertEquals(4, rootResponseObj.getCountStats().getNumBucket()); assertEquals(5, rootResponseObj.getCountStats().getNumTotalDir()); - assertEquals(10, rootResponseObj.getCountStats().getNumTotalKey()); + assertEquals(11, rootResponseObj.getCountStats().getNumTotalKey()); assertEquals("USER", rootResponseObj.getObjectDBInfo().getAcls().get(0).getType()); assertEquals("WRITE", rootResponseObj.getObjectDBInfo().getAcls().get(0) @@ -112,7 +112,7 @@ public void testNSSummaryBasicInfoVolume( volResponseObj.getEntityType()); assertEquals(2, volResponseObj.getCountStats().getNumBucket()); assertEquals(4, volResponseObj.getCountStats().getNumTotalDir()); - assertEquals(6, volResponseObj.getCountStats().getNumTotalKey()); + assertEquals(7, volResponseObj.getCountStats().getNumTotalKey()); assertEquals("TestUser", ((VolumeObjectDBInfo) volResponseObj. getObjectDBInfo()).getAdmin()); assertEquals("TestUser", ((VolumeObjectDBInfo) volResponseObj. @@ -130,7 +130,7 @@ public void testNSSummaryBasicInfoBucketOne(BucketLayout bucketLayout, (NamespaceSummaryResponse) bucketOneResponse.getEntity(); assertEquals(EntityType.BUCKET, bucketOneObj.getEntityType()); assertEquals(4, bucketOneObj.getCountStats().getNumTotalDir()); - assertEquals(4, bucketOneObj.getCountStats().getNumTotalKey()); + assertEquals(5, bucketOneObj.getCountStats().getNumTotalKey()); assertEquals("vol", ((BucketObjectDBInfo) bucketOneObj.getObjectDBInfo()).getVolumeName()); assertEquals(StorageType.DISK, @@ -172,7 +172,7 @@ public void testNSSummaryBasicInfoDir( (NamespaceSummaryResponse) dirOneResponse.getEntity(); assertEquals(EntityType.DIRECTORY, dirOneObj.getEntityType()); assertEquals(3, dirOneObj.getCountStats().getNumTotalDir()); - assertEquals(3, dirOneObj.getCountStats().getNumTotalKey()); + assertEquals(4, dirOneObj.getCountStats().getNumTotalKey()); assertEquals("dir1", dirOneObj.getObjectDBInfo().getName()); assertEquals(0, dirOneObj.getObjectDBInfo().getMetadata().size()); assertEquals(0, dirOneObj.getObjectDBInfo().getQuotaInBytes()); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconNamespaceSummaryManagerImpl.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconNamespaceSummaryManagerImpl.java index cd5618cc9a81..b4e62e9d03c5 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconNamespaceSummaryManagerImpl.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconNamespaceSummaryManagerImpl.java @@ -111,9 +111,9 @@ public void testInitNSSummaryTable() throws IOException { private void putThreeNSMetadata() throws IOException { HashMap hmap = new HashMap<>(); - hmap.put(1L, new NSSummary(1, 2, testBucket, TEST_CHILD_DIR, "dir1", -1)); - hmap.put(2L, new NSSummary(3, 4, testBucket, TEST_CHILD_DIR, "dir2", -1)); - hmap.put(3L, new NSSummary(5, 6, testBucket, TEST_CHILD_DIR, "dir3", -1)); + hmap.put(1L, new NSSummary(1, 2, 2 * 3, testBucket, TEST_CHILD_DIR, "dir1", -1)); + hmap.put(2L, new NSSummary(3, 4, 4 * 3, testBucket, TEST_CHILD_DIR, "dir2", -1)); + hmap.put(3L, new NSSummary(5, 6, 6 * 3, testBucket, TEST_CHILD_DIR, "dir3", -1)); RDBBatchOperation rdbBatchOperation = new RDBBatchOperation(); for (Map.Entry entry: hmap.entrySet()) { reconNamespaceSummaryManager.batchStoreNSSummaries(rdbBatchOperation, diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithFSO.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithFSO.java index fcee68932763..5978e11ba658 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithFSO.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithFSO.java @@ -17,6 +17,7 @@ package org.apache.hadoop.ozone.recon.tasks; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.FILE_TABLE; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD; @@ -34,6 +35,7 @@ import java.util.List; import java.util.Set; import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; @@ -478,16 +480,16 @@ void testProcessWithFSOFlushAfterThresholdAndFailureOfLastElement() Mockito.when(event4.getAction()).thenReturn(OMDBUpdateEvent.OMDBUpdateAction.PUT); OmKeyInfo keyInfo1 = new OmKeyInfo.Builder().setParentObjectID(1).setObjectID(2).setKeyName("key1") - .setBucketName("bucket1") + .setBucketName("bucket1").setReplicationConfig(RatisReplicationConfig.getInstance(THREE)) .setDataSize(1024).setVolumeName("volume1").build(); OmKeyInfo keyInfo2 = new OmKeyInfo.Builder().setParentObjectID(1).setObjectID(3).setKeyName("key2") - .setBucketName("bucket1") + .setBucketName("bucket1").setReplicationConfig(RatisReplicationConfig.getInstance(THREE)) .setDataSize(1024).setVolumeName("volume1").build(); OmKeyInfo keyInfo3 = new OmKeyInfo.Builder().setParentObjectID(1).setObjectID(3).setKeyName("key2") - .setBucketName("bucket1") + .setBucketName("bucket1").setReplicationConfig(RatisReplicationConfig.getInstance(THREE)) .setDataSize(1024).setVolumeName("volume1").build(); OmKeyInfo keyInfo4 = new OmKeyInfo.Builder().setParentObjectID(1).setObjectID(3).setKeyName("key2") - .setBucketName("bucket1") + .setBucketName("bucket1").setReplicationConfig(RatisReplicationConfig.getInstance(THREE)) .setDataSize(1024).setVolumeName("volume1").build(); Mockito.when(event1.getValue()).thenReturn(keyInfo1); Mockito.when(event2.getValue()).thenReturn(keyInfo2); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/upgrade/TestReplicatedSizeOfFilesUpgradeAction.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/upgrade/TestReplicatedSizeOfFilesUpgradeAction.java new file mode 100644 index 000000000000..f4894dc9ae38 --- /dev/null +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/upgrade/TestReplicatedSizeOfFilesUpgradeAction.java @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.upgrade; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mockStatic; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.google.inject.Injector; +import javax.sql.DataSource; +import org.apache.hadoop.ozone.recon.ReconGuiceServletContextListener; +import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; +import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; +import org.apache.hadoop.ozone.recon.tasks.ReconTaskController; +import org.apache.hadoop.ozone.recon.tasks.ReconTaskReInitializationEvent; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.MockedStatic; +import org.mockito.junit.jupiter.MockitoExtension; + +/** + * Test class for ReplicatedSizeOfFilesUpgradeAction. + */ +@ExtendWith(MockitoExtension.class) +public class TestReplicatedSizeOfFilesUpgradeAction { + + private ReplicatedSizeOfFilesUpgradeAction upgradeAction; + @Mock + private DataSource mockDataSource; + @Mock + private Injector mockInjector; + @Mock + private ReconNamespaceSummaryManager mockNsSummaryManager; + @Mock + private ReconOMMetadataManager mockOmMetadataManager; + @Mock + private ReconTaskController mocReconTaskController; + + @BeforeEach + public void setUp() { + upgradeAction = new ReplicatedSizeOfFilesUpgradeAction(); + } + + @Test + public void testExecuteSuccessfullyRebuildsNSSummary() { + try (MockedStatic mockStaticContext = + mockStatic(ReconGuiceServletContextListener.class)) { + mockStaticContext.when(ReconGuiceServletContextListener::getStaticInjector).thenReturn(mockInjector); + when(mockInjector.getInstance(ReconTaskController.class)).thenReturn(mocReconTaskController); + upgradeAction.execute(mockDataSource); + + // Verify that rebuildNSSummaryTree was called exactly once. + verify(mocReconTaskController, times(1)).queueReInitializationEvent(any()); + } + } + + @Test + public void testExecuteThrowsRuntimeExceptionOnRebuildFailure() { + try (MockedStatic mockStaticContext = + mockStatic(ReconGuiceServletContextListener.class)) { + mockStaticContext.when(ReconGuiceServletContextListener::getStaticInjector).thenReturn(mockInjector); + when(mockInjector.getInstance(ReconTaskController.class)).thenReturn(mocReconTaskController); + + // Simulate a failure during the rebuild process + doThrow(new RuntimeException("Simulated rebuild error")).when(mocReconTaskController) + .queueReInitializationEvent(any(ReconTaskReInitializationEvent.ReInitializationReason.class)); + + RuntimeException thrown = assertThrows(RuntimeException.class, () -> upgradeAction.execute(mockDataSource)); + assertEquals("Failed to rebuild NSSummary during upgrade", thrown.getMessage()); + } + } + + @Test + public void testGetTypeReturnsFinalize() { + assertEquals(ReconUpgradeAction.UpgradeActionType.FINALIZE, upgradeAction.getType()); + } +}