diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java index 467810fc306e..f0d52bccb01e 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java @@ -20,6 +20,7 @@ import java.util.ArrayList; import java.util.List; +import org.apache.commons.lang3.tuple.ImmutablePair; import org.apache.hadoop.hdds.utils.db.Codec; import org.apache.hadoop.hdds.utils.db.DelegatedCodec; import org.apache.hadoop.hdds.utils.db.CopyObject; @@ -72,6 +73,24 @@ public List getOmKeyInfoList() { return omKeyInfoList; } + /** + * Returns a pair of long values representing the replicated size and + * unreplicated size of all the keys in the list. + */ + public ImmutablePair getTotalSize() { + long replicatedSize = 0; + long unreplicatedSize = 0; + + for (OmKeyInfo omKeyInfo : omKeyInfoList) { + if (omKeyInfo.getReplicatedSize() != 0) { + replicatedSize += omKeyInfo.getReplicatedSize(); + } + unreplicatedSize += omKeyInfo.getDataSize(); + } + return new ImmutablePair(unreplicatedSize, replicatedSize); + } + + // HDDS-7041. Return a new ArrayList to avoid ConcurrentModifyException public List cloneOmKeyInfoList() { return new ArrayList<>(omKeyInfoList); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java index b662dd7c17aa..2c714786623d 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java @@ -54,6 +54,7 @@ import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_KEY_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_FETCH_COUNT; import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_LIMIT; import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_PREVKEY; @@ -309,6 +310,8 @@ private void getPendingForDeletionKeyInfo( deletedKeyAndDirInsightInfo.getRepeatedOmKeyInfoList(); Table deletedTable = omMetadataManager.getDeletedTable(); + // Create a HashMap for the keysSummary + Map keysSummary = new HashMap<>(); try ( TableIterator> @@ -345,6 +348,10 @@ private void getPendingForDeletionKeyInfo( break; } } + // Create the keysSummary for deleted keys + createKeysSummaryForDeletedKey(keysSummary); + // Set the keysSummary and lastKey in the response + deletedKeyAndDirInsightInfo.setKeysSummary(keysSummary); deletedKeyAndDirInsightInfo.setLastKey(lastKey); } catch (IOException ex) { throw new WebApplicationException(ex, @@ -422,6 +429,30 @@ public Response getDeletedKeyInfo( return Response.ok(deletedKeyInsightInfo).build(); } + /** + * Creates a keys summary for deleted keys and updates the provided + * keysSummary map. Calculates the total number of deleted keys, replicated + * data size, and unreplicated data size. + * + * @param keysSummary A map to store the keys summary information. + */ + private void createKeysSummaryForDeletedKey(Map keysSummary) { + // Fetch the necessary metrics for deleted keys + Long replicatedSizeDeleted = getValueFromId(globalStatsDao.findById( + OmTableInsightTask.getReplicatedSizeKeyFromTable(DELETED_TABLE))); + Long unreplicatedSizeDeleted = getValueFromId(globalStatsDao.findById( + OmTableInsightTask.getUnReplicatedSizeKeyFromTable(DELETED_TABLE))); + Long deletedKeyCount = getValueFromId(globalStatsDao.findById( + OmTableInsightTask.getTableCountKeyFromTable(DELETED_TABLE))); + + // Calculate the total number of deleted keys + keysSummary.put("totalDeletedKeys", deletedKeyCount); + // Calculate the total replicated and unreplicated sizes + keysSummary.put("totalReplicatedDataSize", replicatedSizeDeleted); + keysSummary.put("totalUnreplicatedDataSize", unreplicatedSizeDeleted); + } + + private void getPendingForDeletionDirInfo( int limit, String prevKey, KeyInsightInfoResponse pendingForDeletionKeyInfo) { diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableInsightTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableInsightTask.java index fb4e44126f69..c28f5949f1df 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableInsightTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableInsightTask.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.hadoop.ozone.recon.schema.tables.daos.GlobalStatsDao; import org.hadoop.ozone.recon.schema.tables.pojos.GlobalStats; @@ -43,10 +44,12 @@ import java.util.List; import java.util.Map; + import java.util.Map.Entry; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_KEY_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; import static org.jooq.impl.DSL.currentTimestamp; import static org.jooq.impl.DSL.select; import static org.jooq.impl.DSL.using; @@ -158,8 +161,17 @@ private Triple getTableSizeAndCount( OmKeyInfo omKeyInfo = (OmKeyInfo) kv.getValue(); unReplicatedSize += omKeyInfo.getDataSize(); replicatedSize += omKeyInfo.getReplicatedSize(); + count++; + } + if (kv.getValue() instanceof RepeatedOmKeyInfo) { + RepeatedOmKeyInfo repeatedOmKeyInfo = (RepeatedOmKeyInfo) kv + .getValue(); + Pair result = repeatedOmKeyInfo.getTotalSize(); + unReplicatedSize += result.getRight(); + replicatedSize += result.getLeft(); + // Since we can have multiple deleted keys of same name + count += repeatedOmKeyInfo.getOmKeyInfoList().size(); } - count++; // Increment count for each row } } } @@ -174,6 +186,7 @@ public Collection getTablesToCalculateSize() { List taskTables = new ArrayList<>(); taskTables.add(OPEN_KEY_TABLE); taskTables.add(OPEN_FILE_TABLE); + taskTables.add(DELETED_TABLE); return taskTables; } @@ -196,80 +209,38 @@ public Collection getTaskTables() { @Override public Pair process(OMUpdateEventBatch events) { Iterator eventIterator = events.getIterator(); + // Initialize maps to store count and size information HashMap objectCountMap = initializeCountMap(); HashMap unreplicatedSizeCountMap = initializeSizeMap(false); HashMap replicatedSizeCountMap = initializeSizeMap(true); final Collection taskTables = getTaskTables(); final Collection sizeRelatedTables = getTablesToCalculateSize(); + // Process each update event while (eventIterator.hasNext()) { OMDBUpdateEvent omdbUpdateEvent = eventIterator.next(); String tableName = omdbUpdateEvent.getTable(); - if (!taskTables.contains(tableName)) { continue; } - String countKey = getTableCountKeyFromTable(tableName); - String unReplicatedSizeKey = - getUnReplicatedSizeKeyFromTable(tableName); - String replicatedSizeKey = - getReplicatedSizeKeyFromTable(tableName); - try { switch (omdbUpdateEvent.getAction()) { case PUT: - objectCountMap.computeIfPresent(countKey, (k, count) -> count + 1L); - - // Compute unreplicated and replicated sizes for size-related tables - if (sizeRelatedTables.contains(tableName) && - omdbUpdateEvent.getValue() instanceof OmKeyInfo) { - OmKeyInfo omKeyInfo = (OmKeyInfo) omdbUpdateEvent.getValue(); - unreplicatedSizeCountMap.computeIfPresent(unReplicatedSizeKey, - (k, size) -> size + omKeyInfo.getDataSize()); - replicatedSizeCountMap.computeIfPresent(replicatedSizeKey, - (k, size) -> size + omKeyInfo.getReplicatedSize()); - } + handlePutEvent(omdbUpdateEvent, tableName, sizeRelatedTables, + objectCountMap, unreplicatedSizeCountMap, replicatedSizeCountMap); break; case DELETE: - if (omdbUpdateEvent.getValue() != null) { - objectCountMap.computeIfPresent(countKey, - (k, count) -> count > 0 ? count - 1L : 0L); - - // Compute unreplicated and replicated sizes for size-related tables - if (sizeRelatedTables.contains(tableName) && - omdbUpdateEvent.getValue() instanceof OmKeyInfo) { - OmKeyInfo omKeyInfo = (OmKeyInfo) omdbUpdateEvent.getValue(); - unreplicatedSizeCountMap.computeIfPresent(unReplicatedSizeKey, - (k, size) -> - size > omKeyInfo.getDataSize() ? - size - omKeyInfo.getDataSize() : 0L); - replicatedSizeCountMap.computeIfPresent(replicatedSizeKey, - (k, size) -> - size > omKeyInfo.getReplicatedSize() ? - size - omKeyInfo.getReplicatedSize() : 0L); - } - } + handleDeleteEvent(omdbUpdateEvent, tableName, sizeRelatedTables, + objectCountMap, unreplicatedSizeCountMap, replicatedSizeCountMap); break; + case UPDATE: - if (omdbUpdateEvent.getValue() instanceof OmKeyInfo && - sizeRelatedTables.contains(tableName) && - omdbUpdateEvent.getOldValue() != null) { - OmKeyInfo oldKeyInfo = (OmKeyInfo) omdbUpdateEvent.getOldValue(); - OmKeyInfo newKeyInfo = (OmKeyInfo) omdbUpdateEvent.getValue(); - // Update key size by subtracting the oldSize and adding newSize - unreplicatedSizeCountMap.computeIfPresent(unReplicatedSizeKey, - (k, size) -> size - oldKeyInfo.getDataSize() + - newKeyInfo.getDataSize()); - replicatedSizeCountMap.computeIfPresent(replicatedSizeKey, - (k, size) -> size - oldKeyInfo.getReplicatedSize() + - newKeyInfo.getReplicatedSize()); - } else if (omdbUpdateEvent.getValue() != null) { - LOG.warn("Update event does not have the old Key Info for {}.", - omdbUpdateEvent.getKey()); - } + handleUpdateEvent(omdbUpdateEvent, tableName, sizeRelatedTables, + objectCountMap, unreplicatedSizeCountMap, replicatedSizeCountMap); break; + default: LOG.trace("Skipping DB update event : Table: {}, Action: {}", tableName, omdbUpdateEvent.getAction()); @@ -281,7 +252,7 @@ public Pair process(OMUpdateEventBatch events) { return new ImmutablePair<>(getTaskName(), false); } } - + // Write the updated count and size information to the database if (!objectCountMap.isEmpty()) { writeDataToDB(objectCountMap); } @@ -291,11 +262,157 @@ public Pair process(OMUpdateEventBatch events) { if (!replicatedSizeCountMap.isEmpty()) { writeDataToDB(replicatedSizeCountMap); } - LOG.info("Completed a 'process' run of OmTableInsightTask."); return new ImmutablePair<>(getTaskName(), true); } + private void handlePutEvent(OMDBUpdateEvent event, + String tableName, + Collection sizeRelatedTables, + HashMap objectCountMap, + HashMap unreplicatedSizeCountMap, + HashMap replicatedSizeCountMap) { + String countKey = getTableCountKeyFromTable(tableName); + String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName); + String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName); + + if (sizeRelatedTables.contains(tableName)) { + handleSizeRelatedTablePutEvent(event, countKey, unReplicatedSizeKey, + replicatedSizeKey, objectCountMap, + unreplicatedSizeCountMap, replicatedSizeCountMap); + } else { + objectCountMap.computeIfPresent(countKey, (k, count) -> count + 1L); + } + } + + private void handleSizeRelatedTablePutEvent( + OMDBUpdateEvent event, String countKey, + String unReplicatedSizeKey, String replicatedSizeKey, + HashMap objectCountMap, + HashMap unreplicatedSizeCountMap, + HashMap replicatedSizeCountMap) { + // Handle PUT for OpenKeyTable & OpenFileTable + if (event.getValue() instanceof OmKeyInfo) { + OmKeyInfo omKeyInfo = (OmKeyInfo) event.getValue(); + objectCountMap.computeIfPresent(countKey, (k, count) -> count + 1L); + unreplicatedSizeCountMap.computeIfPresent(unReplicatedSizeKey, + (k, size) -> size + omKeyInfo.getDataSize()); + replicatedSizeCountMap.computeIfPresent(replicatedSizeKey, + (k, size) -> size + omKeyInfo.getReplicatedSize()); + } else if (event.getValue() instanceof RepeatedOmKeyInfo) { + // Handle PUT for DeletedTable + RepeatedOmKeyInfo repeatedOmKeyInfo = + (RepeatedOmKeyInfo) event.getValue(); + objectCountMap.computeIfPresent(countKey, + (k, count) -> count + repeatedOmKeyInfo.getOmKeyInfoList().size()); + Pair result = repeatedOmKeyInfo.getTotalSize(); + unreplicatedSizeCountMap.computeIfPresent(unReplicatedSizeKey, + (k, size) -> size + result.getLeft()); + replicatedSizeCountMap.computeIfPresent(replicatedSizeKey, + (k, size) -> size + result.getRight()); + } + } + + private void handleDeleteEvent(OMDBUpdateEvent event, + String tableName, + Collection sizeRelatedTables, + HashMap objectCountMap, + HashMap unreplicatedSizeCountMap, + HashMap replicatedSizeCountMap) { + String countKey = getTableCountKeyFromTable(tableName); + String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName); + String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName); + + if (event.getValue() != null) { + if (sizeRelatedTables.contains(tableName)) { + handleSizeRelatedTableDeleteEvent(event, countKey, unReplicatedSizeKey, + replicatedSizeKey, objectCountMap, + unreplicatedSizeCountMap, replicatedSizeCountMap); + } else { + objectCountMap.computeIfPresent(countKey, + (k, count) -> count > 0 ? count - 1L : 0L); + } + } + } + + private void handleSizeRelatedTableDeleteEvent( + OMDBUpdateEvent event, String countKey, + String unReplicatedSizeKey, String replicatedSizeKey, + HashMap objectCountMap, + HashMap unreplicatedSizeCountMap, + HashMap replicatedSizeCountMap) { + if (event.getValue() instanceof OmKeyInfo) { + // Handle DELETE for OpenKeyTable & OpenFileTable + OmKeyInfo omKeyInfo = (OmKeyInfo) event.getValue(); + objectCountMap.computeIfPresent(countKey, + (k, count) -> count > 0 ? count - 1L : 0L); + unreplicatedSizeCountMap.computeIfPresent(unReplicatedSizeKey, + (k, size) -> size > omKeyInfo.getDataSize() ? + size - omKeyInfo.getDataSize() : 0L); + replicatedSizeCountMap.computeIfPresent(replicatedSizeKey, + (k, size) -> size > omKeyInfo.getReplicatedSize() ? + size - omKeyInfo.getReplicatedSize() : 0L); + } else if (event.getValue() instanceof RepeatedOmKeyInfo) { + // Handle DELETE for DeletedTable + RepeatedOmKeyInfo repeatedOmKeyInfo = + (RepeatedOmKeyInfo) event.getValue(); + objectCountMap.computeIfPresent(countKey, (k, count) -> + count > 0 ? count - repeatedOmKeyInfo.getOmKeyInfoList().size() : 0L); + Pair result = repeatedOmKeyInfo.getTotalSize(); + unreplicatedSizeCountMap.computeIfPresent(unReplicatedSizeKey, + (k, size) -> size > result.getLeft() ? size - result.getLeft() : 0L); + replicatedSizeCountMap.computeIfPresent(replicatedSizeKey, + (k, size) -> size > result.getRight() ? size - result.getRight() : + 0L); + } + } + + private void handleUpdateEvent(OMDBUpdateEvent event, + String tableName, + Collection sizeRelatedTables, + HashMap objectCountMap, + HashMap unreplicatedSizeCountMap, + HashMap replicatedSizeCountMap) { + String countKey = getTableCountKeyFromTable(tableName); + String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName); + String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName); + // In Update event the count for the table will not change. So we don't + // need to update the count. Except for RepeatedOmKeyInfo, for which the + // size of omKeyInfoList can change + if (event.getValue() instanceof OmKeyInfo && event.getOldValue() != null && + sizeRelatedTables.contains(tableName)) { + // Handle UPDATE for OpenKeyTable & OpenFileTable + OmKeyInfo oldKeyInfo = (OmKeyInfo) event.getOldValue(); + OmKeyInfo newKeyInfo = (OmKeyInfo) event.getValue(); + unreplicatedSizeCountMap.computeIfPresent(unReplicatedSizeKey, + (k, size) -> size - oldKeyInfo.getDataSize() + + newKeyInfo.getDataSize()); + replicatedSizeCountMap.computeIfPresent(replicatedSizeKey, + (k, size) -> size - oldKeyInfo.getReplicatedSize() + + newKeyInfo.getReplicatedSize()); + } else if (event.getValue() instanceof RepeatedOmKeyInfo && + sizeRelatedTables.contains(tableName) && event.getOldValue() != null) { + // Handle UPDATE for DeletedTable + RepeatedOmKeyInfo oldRepeatedOmKeyInfo = + (RepeatedOmKeyInfo) event.getOldValue(); + RepeatedOmKeyInfo newRepeatedOmKeyInfo = + (RepeatedOmKeyInfo) event.getValue(); + objectCountMap.computeIfPresent(countKey, + (k, count) -> count > 0 ? + count - oldRepeatedOmKeyInfo.getOmKeyInfoList().size() + + newRepeatedOmKeyInfo.getOmKeyInfoList().size() : 0L); + Pair oldSize = oldRepeatedOmKeyInfo.getTotalSize(); + Pair newSize = newRepeatedOmKeyInfo.getTotalSize(); + unreplicatedSizeCountMap.computeIfPresent(unReplicatedSizeKey, + (k, size) -> size - oldSize.getLeft() + newSize.getLeft()); + replicatedSizeCountMap.computeIfPresent(replicatedSizeKey, + (k, size) -> size - oldSize.getRight() + newSize.getRight()); + } else if (event.getValue() != null) { + LOG.warn("Update event does not have the old Key Info for {}.", + event.getKey()); + } + } + private void writeDataToDB(Map dataMap) { List insertGlobalStats = new ArrayList<>(); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java index 838703bb593d..3f00eb2adc0c 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java @@ -346,6 +346,7 @@ public static void writeDeletedKeysToOm(OMMetadataManager omMetadataManager, .setBucketName(bucketName) .setVolumeName(volName) .setKeyName(keyNames.get(i)) + .setDataSize(100L) .setReplicationConfig(StandaloneReplicationConfig.getInstance(ONE)) .build()); } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java index 232eb7bcc3ca..5d2b47d0d3e6 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java @@ -642,7 +642,11 @@ public void testGetClusterState() throws Exception { Assertions.assertEquals(2, clusterStateResponse.getVolumes()); Assertions.assertEquals(2, clusterStateResponse.getBuckets()); Assertions.assertEquals(3, clusterStateResponse.getKeys()); - Assertions.assertEquals(3, clusterStateResponse.getKeysPendingDeletion()); + // Since a single RepeatedOmKeyInfo can contain multiple deleted keys with + // the same name, the total count of pending deletion keys is determined by + // summing the count of the keyInfoList. Each keyInfoList comprises + // OmKeyInfo objects that represent the deleted keys. + Assertions.assertEquals(6, clusterStateResponse.getKeysPendingDeletion()); Assertions.assertEquals(3, clusterStateResponse.getDeletedDirs()); } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java index 1dd512156300..c88a26b8773a 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java @@ -221,18 +221,25 @@ public void testKeysSummaryAttribute() { statsDao.insert(newRecord); newRecord = new GlobalStats("openKeyTableReplicatedDataSize", 30L, now); statsDao.insert(newRecord); + newRecord = new GlobalStats("deletedTableReplicatedDataSize", 30L, now); + statsDao.insert(newRecord); newRecord = new GlobalStats("openFileTableUnReplicatedDataSize", 10L, now); statsDao.insert(newRecord); newRecord = new GlobalStats("openKeyTableUnReplicatedDataSize", 10L, now); statsDao.insert(newRecord); + newRecord = new GlobalStats("deletedTableUnReplicatedDataSize", 10L, now); + statsDao.insert(newRecord); + // Insert records for table counts newRecord = new GlobalStats("openKeyTableTableCount", 3L, now); statsDao.insert(newRecord); newRecord = new GlobalStats("openFileTableTableCount", 3L, now); statsDao.insert(newRecord); + newRecord = new GlobalStats("deletedTableTableCount", 3L, now); + statsDao.insert(newRecord); - // Call the API to get the response + // Call the API of Open keys to get the response Response openKeyInfoResp = omdbInsightEndpoint.getOpenKeyInfo(-1, "", true, true); KeyInsightInfoResponse keyInsightInfoResp = @@ -242,6 +249,17 @@ public void testKeysSummaryAttribute() { Assertions.assertEquals(60L, summary.get("totalReplicatedDataSize")); Assertions.assertEquals(20L, summary.get("totalUnreplicatedDataSize")); Assertions.assertEquals(6L, summary.get("totalOpenKeys")); + + // Call the API of Deleted keys to get the response + Response deletedKeyInfoResp = + omdbInsightEndpoint.getDeletedKeyInfo(-1, ""); + keyInsightInfoResp = + (KeyInsightInfoResponse) deletedKeyInfoResp.getEntity(); + Assertions.assertNotNull(keyInsightInfoResp); + summary = keyInsightInfoResp.getKeysSummary(); + Assertions.assertEquals(30L, summary.get("totalReplicatedDataSize")); + Assertions.assertEquals(10L, summary.get("totalUnreplicatedDataSize")); + Assertions.assertEquals(3L, summary.get("totalDeletedKeys")); } @Test diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmTableInsightTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmTableInsightTask.java index 04fd55d58c00..8bf633711714 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmTableInsightTask.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmTableInsightTask.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.recon.tasks; +import org.apache.commons.lang3.tuple.ImmutablePair; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -25,6 +26,7 @@ import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.recon.persistence.AbstractReconSqlDBTest; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.apache.hadoop.ozone.recon.tasks.OMDBUpdateEvent.OMUpdateEventBuilder; @@ -36,13 +38,17 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_KEY_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.VOLUME_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.BUCKET_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDeletedKeysToOm; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeOpenKeyToOm; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeOpenFileToOm; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; @@ -125,7 +131,7 @@ public void testReprocessForCount() throws Exception { @Test - public void testReprocessForSize() throws Exception { + public void testReprocessForOpenKeyTable() throws Exception { // Populate the OpenKeys table in OM DB writeOpenKeyToOm(reconOMMetadataManager, "key1", "Bucket1", "Volume1", null, 1L); @@ -134,6 +140,17 @@ public void testReprocessForSize() throws Exception { writeOpenKeyToOm(reconOMMetadataManager, "key1", "Bucket3", "Volume3", null, 3L); + Pair result = + omTableInsightTask.reprocess(reconOMMetadataManager); + assertTrue(result.getRight()); + assertEquals(3L, getCountForTable(OPEN_KEY_TABLE)); + // Test for both replicated and unreplicated size for OPEN_KEY_TABLE + assertEquals(6L, getUnReplicatedSizeForTable(OPEN_KEY_TABLE)); + assertEquals(18L, getReplicatedSizeForTable(OPEN_KEY_TABLE)); + } + + @Test + public void testReprocessForOpenFileTable() throws Exception { // Populate the OpenFile table in OM DB writeOpenFileToOm(reconOMMetadataManager, "file1", "Bucket1", "Volume1", "file1", 1, 0, 1, 1, null, 1L); @@ -145,16 +162,35 @@ public void testReprocessForSize() throws Exception { Pair result = omTableInsightTask.reprocess(reconOMMetadataManager); assertTrue(result.getRight()); - assertEquals(3L, getCountForTable(OPEN_KEY_TABLE)); assertEquals(3L, getCountForTable(OPEN_FILE_TABLE)); - // Test for both replicated and unreplicated size for OPEN_KEY_TABLE - assertEquals(6L, getUnReplicatedSizeForTable(OPEN_KEY_TABLE)); - assertEquals(18L, getReplicatedSizeForTable(OPEN_KEY_TABLE)); // Test for both replicated and unreplicated size for OPEN_FILE_TABLE assertEquals(6L, getUnReplicatedSizeForTable(OPEN_FILE_TABLE)); assertEquals(18L, getReplicatedSizeForTable(OPEN_FILE_TABLE)); } + @Test + public void testReprocessForDeletedTable() throws Exception { + // Populate the deletedKeys table in OM DB + // By default the size of each key is set to 100L + List deletedKeysList1 = Arrays.asList("key1"); + writeDeletedKeysToOm(reconOMMetadataManager, + deletedKeysList1, "Bucket1", "Volume1"); + List deletedKeysList2 = Arrays.asList("key2", "key2"); + writeDeletedKeysToOm(reconOMMetadataManager, + deletedKeysList2, "Bucket2", "Volume2"); + List deletedKeysList3 = Arrays.asList("key3", "key3", "key3"); + writeDeletedKeysToOm(reconOMMetadataManager, + deletedKeysList3, "Bucket3", "Volume3"); + + + Pair result = + omTableInsightTask.reprocess(reconOMMetadataManager); + assertTrue(result.getRight()); + assertEquals(6L, getCountForTable(DELETED_TABLE)); + // Test for both replicated and unreplicated size for DELETED_TABLE + assertEquals(600L, getUnReplicatedSizeForTable(DELETED_TABLE)); + assertEquals(600L, getReplicatedSizeForTable(DELETED_TABLE)); + } @Test public void testProcessForCount() { @@ -178,8 +214,7 @@ public void testProcessForCount() { assertEquals(4L, getCountForTable(KEY_TABLE)); assertEquals(4L, getCountForTable(VOLUME_TABLE)); assertEquals(4L, getCountForTable(BUCKET_TABLE)); - assertEquals(4L, getCountForTable(OPEN_KEY_TABLE)); - assertEquals(4L, getCountForTable(DELETED_TABLE)); + assertEquals(4L, getCountForTable(FILE_TABLE)); // add a new key and simulate delete on non-existing item (value: null) ArrayList newEvents = new ArrayList<>(); @@ -196,12 +231,11 @@ public void testProcessForCount() { assertEquals(5L, getCountForTable(KEY_TABLE)); assertEquals(5L, getCountForTable(VOLUME_TABLE)); assertEquals(5L, getCountForTable(BUCKET_TABLE)); - assertEquals(5L, getCountForTable(OPEN_KEY_TABLE)); - assertEquals(5L, getCountForTable(DELETED_TABLE)); + assertEquals(5L, getCountForTable(FILE_TABLE)); } @Test - public void testProcessForSize() { + public void testProcessForOpenKeyTableAndOpenFileTable() { // Prepare mock data size Long sizeToBeReturned = 1000L; OmKeyInfo omKeyInfo = mock(OmKeyInfo.class); @@ -263,6 +297,78 @@ public void testProcessForSize() { } } + @Test + public void testProcessForDeletedTable() { + // Prepare mock data size + ImmutablePair sizeToBeReturned = + new ImmutablePair<>(1000L, 3000L); + ArrayList omKeyInfoList = new ArrayList<>(); + // Add 5 OmKeyInfo objects to the list + for (int i = 0; i < 5; i++) { + OmKeyInfo omKeyInfo = + getOmKeyInfo("sampleVol", "non_fso_Bucket", "non_fso_key1", true); + // Set properties of OmKeyInfo object if needed + omKeyInfoList.add(omKeyInfo); + } + RepeatedOmKeyInfo repeatedOmKeyInfo = mock(RepeatedOmKeyInfo.class); + when(repeatedOmKeyInfo.getTotalSize()).thenReturn(sizeToBeReturned); + when(repeatedOmKeyInfo.getOmKeyInfoList()).thenReturn(omKeyInfoList); + + // Test PUT events + ArrayList putEvents = new ArrayList<>(); + for (int i = 0; i < 5; i++) { + putEvents.add( + getOMUpdateEvent("item" + i, repeatedOmKeyInfo, DELETED_TABLE, PUT, + null)); + } + OMUpdateEventBatch putEventBatch = new OMUpdateEventBatch(putEvents); + omTableInsightTask.process(putEventBatch); + // Each of the 5 RepeatedOmKeyInfo object has 5 OmKeyInfo obj, + // so total deleted keys should be 5 * 5 = 25 + assertEquals(25L, getCountForTable(DELETED_TABLE)); + // After 5 PUTs, size should be 5 * 1000 = 5000 for each size-related table + assertEquals(5000L, getUnReplicatedSizeForTable(DELETED_TABLE)); + assertEquals(15000L, getReplicatedSizeForTable(DELETED_TABLE)); + + + // Test DELETE events + ArrayList deleteEvents = new ArrayList<>(); + // Delete "item0" + deleteEvents.add( + getOMUpdateEvent("item0", repeatedOmKeyInfo, DELETED_TABLE, DELETE, + null)); + OMUpdateEventBatch deleteEventBatch = new OMUpdateEventBatch(deleteEvents); + omTableInsightTask.process(deleteEventBatch); + // After deleting "item0" total deleted keys should be 20 + assertEquals(20L, getCountForTable(DELETED_TABLE)); + // After deleting "item0", size should be 4 * 1000 = 4000 + assertEquals(4000L, getUnReplicatedSizeForTable(DELETED_TABLE)); + assertEquals(12000L, getReplicatedSizeForTable(DELETED_TABLE)); + + + // Test UPDATE events + ArrayList updateEvents = new ArrayList<>(); + // Update "item1" with new sizes + ImmutablePair newSizesToBeReturned = + new ImmutablePair<>(500L, 1500L); + RepeatedOmKeyInfo newRepeatedOmKeyInfo = mock(RepeatedOmKeyInfo.class); + when(newRepeatedOmKeyInfo.getTotalSize()).thenReturn(newSizesToBeReturned); + when(newRepeatedOmKeyInfo.getOmKeyInfoList()).thenReturn( + omKeyInfoList.subList(1, 5)); + OMUpdateEventBatch updateEventBatch = new OMUpdateEventBatch(updateEvents); + // For item1, newSize=500 and totalCount of deleted keys should be 4 + updateEvents.add( + getOMUpdateEvent("item1", newRepeatedOmKeyInfo, DELETED_TABLE, UPDATE, + repeatedOmKeyInfo)); + omTableInsightTask.process(updateEventBatch); + // Since one key has been deleted, total deleted keys should be 19 + assertEquals(19L, getCountForTable(DELETED_TABLE)); + // After updating "item1", size should be 4000 - 1000 + 500 = 3500 + // presentValue - oldValue + newValue = updatedValue + assertEquals(3500L, getUnReplicatedSizeForTable(DELETED_TABLE)); + assertEquals(10500L, getReplicatedSizeForTable(DELETED_TABLE)); + } + private OMDBUpdateEvent getOMUpdateEvent( String name, Object value,