diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java index 37d6ae42b5c7..4a931544b6e0 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java @@ -57,6 +57,7 @@ import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_KEY_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_DIR_TABLE; import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_FETCH_COUNT; import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_LIMIT; import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_PREVKEY; @@ -651,6 +652,36 @@ public Response getDeletedDirInfo( return Response.ok(deletedDirInsightInfo).build(); } + /** + * Retrieves the summary of deleted directories. + * + * This method calculates and returns a summary of deleted directories. + * @return The HTTP response body includes a map with the following entries: + * - "totalDeletedDirectories": the total number of deleted directories + * + * Example response: + * { + * "totalDeletedDirectories": 8, + * } + */ + @GET + @Path("/deletePending/dirs/summary") + public Response getDeletedDirectorySummary() { + Map dirSummary = new HashMap<>(); + // Create a keys summary for deleted directories + createSummaryForDeletedDirectories(dirSummary); + return Response.ok(dirSummary).build(); + } + + private void createSummaryForDeletedDirectories( + Map dirSummary) { + // Fetch the necessary metrics for deleted directories. + Long deletedDirCount = getValueFromId(globalStatsDao.findById( + OmTableInsightTask.getTableCountKeyFromTable(DELETED_DIR_TABLE))); + // Calculate the total number of deleted directories + dirSummary.put("totalDeletedDirectories", deletedDirCount); + } + private void updateReplicatedAndUnReplicatedTotal( KeyInsightInfoResponse deletedKeyAndDirInsightInfo, RepeatedOmKeyInfo repeatedOmKeyInfo) { diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/DeletedKeysInsightHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/DeletedKeysInsightHandler.java new file mode 100644 index 000000000000..5a6d7a256e49 --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/DeletedKeysInsightHandler.java @@ -0,0 +1,147 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.tasks; + +import org.apache.commons.lang3.tuple.Pair; +import org.apache.commons.lang3.tuple.Triple; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.HashMap; + +/** + * Manages records in the Deleted Table, updating counts and sizes of + * pending Key Deletions in the backend. + */ +public class DeletedKeysInsightHandler implements OmTableHandler { + + private static final Logger LOG = + LoggerFactory.getLogger(DeletedKeysInsightHandler.class); + + /** + * Invoked by the process method to add information on those keys that have + * been backlogged in the backend for deletion. + */ + @Override + public void handlePutEvent(OMDBUpdateEvent event, + String tableName, + HashMap objectCountMap, + HashMap unReplicatedSizeMap, + HashMap replicatedSizeMap) { + + String countKey = getTableCountKeyFromTable(tableName); + String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName); + String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName); + + if (event.getValue() != null) { + RepeatedOmKeyInfo repeatedOmKeyInfo = + (RepeatedOmKeyInfo) event.getValue(); + objectCountMap.computeIfPresent(countKey, + (k, count) -> count + repeatedOmKeyInfo.getOmKeyInfoList().size()); + Pair result = repeatedOmKeyInfo.getTotalSize(); + unReplicatedSizeMap.computeIfPresent(unReplicatedSizeKey, + (k, size) -> size + result.getLeft()); + replicatedSizeMap.computeIfPresent(replicatedSizeKey, + (k, size) -> size + result.getRight()); + } else { + LOG.warn("Put event does not have the Key Info for {}.", + event.getKey()); + } + + } + + /** + * Invoked by the process method to remove information on those keys that have + * been successfully deleted from the backend. + */ + @Override + public void handleDeleteEvent(OMDBUpdateEvent event, + String tableName, + HashMap objectCountMap, + HashMap unReplicatedSizeMap, + HashMap replicatedSizeMap) { + + String countKey = getTableCountKeyFromTable(tableName); + String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName); + String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName); + + if (event.getValue() != null) { + RepeatedOmKeyInfo repeatedOmKeyInfo = + (RepeatedOmKeyInfo) event.getValue(); + objectCountMap.computeIfPresent(countKey, (k, count) -> + count > 0 ? count - repeatedOmKeyInfo.getOmKeyInfoList().size() : 0L); + Pair result = repeatedOmKeyInfo.getTotalSize(); + unReplicatedSizeMap.computeIfPresent(unReplicatedSizeKey, + (k, size) -> size > result.getLeft() ? size - result.getLeft() : 0L); + replicatedSizeMap.computeIfPresent(replicatedSizeKey, + (k, size) -> size > result.getRight() ? size - result.getRight() : + 0L); + } else { + LOG.warn("Delete event does not have the Key Info for {}.", + event.getKey()); + } + } + + /** + * Invoked by the process method to update the statistics on the keys + * pending to be deleted. + */ + @Override + public void handleUpdateEvent(OMDBUpdateEvent event, + String tableName, + HashMap objectCountMap, + HashMap unReplicatedSizeMap, + HashMap replicatedSizeMap) { + // The size of deleted keys cannot change hence no-op. + return; + } + + /** + * Invoked by the reprocess method to calculate the records count of the + * deleted table and the sizes of replicated and unreplicated keys that are + * pending deletion in Ozone. + */ + @Override + public Triple getTableSizeAndCount( + TableIterator> iterator) + throws IOException { + long count = 0; + long unReplicatedSize = 0; + long replicatedSize = 0; + + if (iterator != null) { + while (iterator.hasNext()) { + Table.KeyValue kv = iterator.next(); + if (kv != null && kv.getValue() != null) { + RepeatedOmKeyInfo repeatedOmKeyInfo = (RepeatedOmKeyInfo) kv + .getValue(); + Pair result = repeatedOmKeyInfo.getTotalSize(); + unReplicatedSize += result.getRight(); + replicatedSize += result.getLeft(); + count += repeatedOmKeyInfo.getOmKeyInfoList().size(); + } + } + } + return Triple.of(count, unReplicatedSize, replicatedSize); + } +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableHandler.java new file mode 100644 index 000000000000..5ae23b68a703 --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableHandler.java @@ -0,0 +1,131 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.tasks; + +import org.apache.commons.lang3.tuple.Triple; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; + +import java.io.IOException; +import java.util.HashMap; + +/** + * Interface for handling PUT, DELETE and UPDATE events for size-related + * tables for OM Insights. + */ +public interface OmTableHandler { + + /** + * Handles a PUT event for size-related tables by updating both the data + * sizes and their corresponding record counts in the tables. + * + * @param event The PUT event to be processed. + * @param tableName Table name associated with the event. + * @param objectCountMap A map storing object counts. + * @param unReplicatedSizeMap A map storing unReplicated size counts. + * @param replicatedSizeMap A map storing replicated size counts. + */ + void handlePutEvent(OMDBUpdateEvent event, + String tableName, + HashMap objectCountMap, + HashMap unReplicatedSizeMap, + HashMap replicatedSizeMap); + + + /** + * Handles a DELETE event for size-related tables by updating both the data + * sizes and their corresponding record counts in the tables. + * + * @param event The DELETE event to be processed. + * @param tableName Table name associated with the event. + * @param objectCountMap A map storing object counts. + * @param unReplicatedSizeMap A map storing unReplicated size counts. + * @param replicatedSizeMap A map storing replicated size counts. + */ + void handleDeleteEvent(OMDBUpdateEvent event, + String tableName, + HashMap objectCountMap, + HashMap unReplicatedSizeMap, + HashMap replicatedSizeMap); + + + /** + * Handles an UPDATE event for size-related tables by updating both the data + * sizes and their corresponding record counts in the tables. + * + * @param event The UPDATE event to be processed. + * @param tableName Table name associated with the event. + * @param objectCountMap A map storing object counts. + * @param unReplicatedSizeMap A map storing unReplicated size counts. + * @param replicatedSizeMap A map storing replicated size counts. + */ + void handleUpdateEvent(OMDBUpdateEvent event, + String tableName, + HashMap objectCountMap, + HashMap unReplicatedSizeMap, + HashMap replicatedSizeMap); + + + /** + * Returns a triple with the total count of records (left), total unreplicated + * size (middle), and total replicated size (right) in the given iterator. + * Increments count for each record and adds the dataSize if a record's value + * is an instance of OmKeyInfo,RepeatedOmKeyInfo. + * If the iterator is null, returns (0, 0, 0). + * + * @param iterator The iterator over the table to be iterated. + * @return A Triple with three Long values representing the count, + * unReplicated size and replicated size. + * @throws IOException If an I/O error occurs during the iterator traversal. + */ + Triple getTableSizeAndCount( + TableIterator> iterator) + throws IOException; + + + /** + * Returns the count key for the given table. + * + * @param tableName The name of the table. + * @return The count key for the table. + */ + default String getTableCountKeyFromTable(String tableName) { + return tableName + "Count"; + } + + /** + * Returns the replicated size key for the given table. + * + * @param tableName The name of the table. + * @return The replicated size key for the table. + */ + default String getReplicatedSizeKeyFromTable(String tableName) { + return tableName + "ReplicatedDataSize"; + } + + /** + * Returns the unreplicated size key for the given table. + * + * @param tableName The name of the table. + * @return The unreplicated size key for the table. + */ + default String getUnReplicatedSizeKeyFromTable(String tableName) { + return tableName + "UnReplicatedDataSize"; + } +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableInsightTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableInsightTask.java index c814d9d9e33f..3e84f311c942 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableInsightTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableInsightTask.java @@ -26,8 +26,6 @@ import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.hadoop.ozone.recon.schema.tables.daos.GlobalStatsDao; import org.hadoop.ozone.recon.schema.tables.pojos.GlobalStats; @@ -37,22 +35,20 @@ import java.io.IOException; import java.sql.Timestamp; -import java.util.ArrayList; -import java.util.Collection; import java.util.HashMap; import java.util.Iterator; -import java.util.List; import java.util.Map; - - +import java.util.Collection; import java.util.Map.Entry; +import java.util.ArrayList; +import java.util.List; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_KEY_TABLE; -import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; -import static org.jooq.impl.DSL.currentTimestamp; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE; import static org.jooq.impl.DSL.select; import static org.jooq.impl.DSL.using; +import static org.jooq.impl.DSL.currentTimestamp; /** * Class to iterate over the OM DB and store the total counts of volumes, @@ -65,14 +61,21 @@ public class OmTableInsightTask implements ReconOmTask { private GlobalStatsDao globalStatsDao; private Configuration sqlConfiguration; private ReconOMMetadataManager reconOMMetadataManager; + private Map tableHandlers; @Inject public OmTableInsightTask(GlobalStatsDao globalStatsDao, - Configuration sqlConfiguration, - ReconOMMetadataManager reconOMMetadataManager) { + Configuration sqlConfiguration, + ReconOMMetadataManager reconOMMetadataManager) { this.globalStatsDao = globalStatsDao; this.sqlConfiguration = sqlConfiguration; this.reconOMMetadataManager = reconOMMetadataManager; + + // Initialize table handlers + tableHandlers = new HashMap<>(); + tableHandlers.put(OPEN_KEY_TABLE, new OpenKeysInsightHandler()); + tableHandlers.put(OPEN_FILE_TABLE, new OpenKeysInsightHandler()); + tableHandlers.put(DELETED_TABLE, new DeletedKeysInsightHandler()); } /** @@ -90,8 +93,8 @@ public OmTableInsightTask(GlobalStatsDao globalStatsDao, @Override public Pair reprocess(OMMetadataManager omMetadataManager) { HashMap objectCountMap = initializeCountMap(); - HashMap unReplicatedSizeCountMap = initializeSizeMap(false); - HashMap replicatedSizeCountMap = initializeSizeMap(true); + HashMap unReplicatedSizeMap = initializeSizeMap(false); + HashMap replicatedSizeMap = initializeSizeMap(true); for (String tableName : getTaskTables()) { Table table = omMetadataManager.getTable(tableName); @@ -100,16 +103,16 @@ public Pair reprocess(OMMetadataManager omMetadataManager) { return new ImmutablePair<>(getTaskName(), false); } - try ( - TableIterator> iterator - = table.iterator()) { - if (getTablesToCalculateSize().contains(tableName)) { - Triple details = getTableSizeAndCount(iterator); + try (TableIterator> iterator + = table.iterator()) { + if (tableHandlers.containsKey(tableName)) { + Triple details = + tableHandlers.get(tableName).getTableSizeAndCount(iterator); objectCountMap.put(getTableCountKeyFromTable(tableName), details.getLeft()); - unReplicatedSizeCountMap.put( + unReplicatedSizeMap.put( getUnReplicatedSizeKeyFromTable(tableName), details.getMiddle()); - replicatedSizeCountMap.put(getReplicatedSizeKeyFromTable(tableName), + replicatedSizeMap.put(getReplicatedSizeKeyFromTable(tableName), details.getRight()); } else { long count = Iterators.size(iterator); @@ -124,72 +127,17 @@ public Pair reprocess(OMMetadataManager omMetadataManager) { if (!objectCountMap.isEmpty()) { writeDataToDB(objectCountMap); } - if (!unReplicatedSizeCountMap.isEmpty()) { - writeDataToDB(unReplicatedSizeCountMap); + if (!unReplicatedSizeMap.isEmpty()) { + writeDataToDB(unReplicatedSizeMap); } - if (!replicatedSizeCountMap.isEmpty()) { - writeDataToDB(replicatedSizeCountMap); + if (!replicatedSizeMap.isEmpty()) { + writeDataToDB(replicatedSizeMap); } LOG.info("Completed a 'reprocess' run of OmTableInsightTask."); return new ImmutablePair<>(getTaskName(), true); } - /** - * Returns a triple with the total count of records (left), total unreplicated - * size (middle), and total replicated size (right) in the given iterator. - * Increments count for each record and adds the dataSize if a record's value - * is an instance of OmKeyInfo. If the iterator is null, returns (0, 0, 0). - * - * @param iterator The iterator over the table to be iterated. - * @return A Triple with three Long values representing the count, - * unreplicated size and replicated size. - * @throws IOException If an I/O error occurs during the iterator traversal. - */ - private Triple getTableSizeAndCount( - TableIterator> iterator) - throws IOException { - long count = 0; - long unReplicatedSize = 0; - long replicatedSize = 0; - - if (iterator != null) { - while (iterator.hasNext()) { - Table.KeyValue kv = iterator.next(); - if (kv != null && kv.getValue() != null) { - if (kv.getValue() instanceof OmKeyInfo) { - OmKeyInfo omKeyInfo = (OmKeyInfo) kv.getValue(); - unReplicatedSize += omKeyInfo.getDataSize(); - replicatedSize += omKeyInfo.getReplicatedSize(); - count++; - } - if (kv.getValue() instanceof RepeatedOmKeyInfo) { - RepeatedOmKeyInfo repeatedOmKeyInfo = (RepeatedOmKeyInfo) kv - .getValue(); - Pair result = repeatedOmKeyInfo.getTotalSize(); - unReplicatedSize += result.getRight(); - replicatedSize += result.getLeft(); - // Since we can have multiple deleted keys of same name - count += repeatedOmKeyInfo.getOmKeyInfoList().size(); - } - } - } - } - - return Triple.of(count, unReplicatedSize, replicatedSize); - } - - /** - * Returns a collection of table names that require data size calculation. - */ - public Collection getTablesToCalculateSize() { - List taskTables = new ArrayList<>(); - taskTables.add(OPEN_KEY_TABLE); - taskTables.add(OPEN_FILE_TABLE); - taskTables.add(DELETED_TABLE); - return taskTables; - } - @Override public String getTaskName() { return "OmTableInsightTask"; @@ -211,10 +159,9 @@ public Pair process(OMUpdateEventBatch events) { Iterator eventIterator = events.getIterator(); // Initialize maps to store count and size information HashMap objectCountMap = initializeCountMap(); - HashMap unreplicatedSizeCountMap = initializeSizeMap(false); - HashMap replicatedSizeCountMap = initializeSizeMap(true); + HashMap unReplicatedSizeMap = initializeSizeMap(false); + HashMap replicatedSizeMap = initializeSizeMap(true); final Collection taskTables = getTaskTables(); - final Collection sizeRelatedTables = getTablesToCalculateSize(); // Process each update event while (eventIterator.hasNext()) { @@ -223,22 +170,21 @@ public Pair process(OMUpdateEventBatch events) { if (!taskTables.contains(tableName)) { continue; } - try { switch (omdbUpdateEvent.getAction()) { case PUT: - handlePutEvent(omdbUpdateEvent, tableName, sizeRelatedTables, - objectCountMap, unreplicatedSizeCountMap, replicatedSizeCountMap); + handlePutEvent(omdbUpdateEvent, tableName, objectCountMap, + unReplicatedSizeMap, replicatedSizeMap); break; case DELETE: - handleDeleteEvent(omdbUpdateEvent, tableName, sizeRelatedTables, - objectCountMap, unreplicatedSizeCountMap, replicatedSizeCountMap); + handleDeleteEvent(omdbUpdateEvent, tableName, objectCountMap, + unReplicatedSizeMap, replicatedSizeMap); break; case UPDATE: - handleUpdateEvent(omdbUpdateEvent, tableName, sizeRelatedTables, - objectCountMap, unreplicatedSizeCountMap, replicatedSizeCountMap); + handleUpdateEvent(omdbUpdateEvent, tableName, objectCountMap, + unReplicatedSizeMap, replicatedSizeMap); break; default: @@ -256,11 +202,11 @@ public Pair process(OMUpdateEventBatch events) { if (!objectCountMap.isEmpty()) { writeDataToDB(objectCountMap); } - if (!unreplicatedSizeCountMap.isEmpty()) { - writeDataToDB(unreplicatedSizeCountMap); + if (!unReplicatedSizeMap.isEmpty()) { + writeDataToDB(unReplicatedSizeMap); } - if (!replicatedSizeCountMap.isEmpty()) { - writeDataToDB(replicatedSizeCountMap); + if (!replicatedSizeMap.isEmpty()) { + writeDataToDB(replicatedSizeMap); } LOG.info("Completed a 'process' run of OmTableInsightTask."); return new ImmutablePair<>(getTaskName(), true); @@ -268,65 +214,34 @@ public Pair process(OMUpdateEventBatch events) { private void handlePutEvent(OMDBUpdateEvent event, String tableName, - Collection sizeRelatedTables, HashMap objectCountMap, - HashMap unreplicatedSizeCountMap, - HashMap replicatedSizeCountMap) { - - if (sizeRelatedTables.contains(tableName)) { - handleSizeRelatedTablePutEvent(event, tableName, objectCountMap, - unreplicatedSizeCountMap, replicatedSizeCountMap); - } else { - String countKey = getTableCountKeyFromTable(tableName); - objectCountMap.computeIfPresent(countKey, (k, count) -> count + 1L); - } - } - - private void handleSizeRelatedTablePutEvent( - OMDBUpdateEvent event, - String tableName, - HashMap objectCountMap, - HashMap unreplicatedSizeCountMap, - HashMap replicatedSizeCountMap) { - - String countKey = getTableCountKeyFromTable(tableName); - String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName); - String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName); - - if (event.getValue() instanceof OmKeyInfo) { - // Handle PUT for OpenKeyTable & OpenFileTable - OmKeyInfo omKeyInfo = (OmKeyInfo) event.getValue(); - objectCountMap.computeIfPresent(countKey, (k, count) -> count + 1L); - unreplicatedSizeCountMap.computeIfPresent(unReplicatedSizeKey, - (k, size) -> size + omKeyInfo.getDataSize()); - replicatedSizeCountMap.computeIfPresent(replicatedSizeKey, - (k, size) -> size + omKeyInfo.getReplicatedSize()); - } else if (event.getValue() instanceof RepeatedOmKeyInfo) { - // Handle PUT for DeletedTable - RepeatedOmKeyInfo repeatedOmKeyInfo = - (RepeatedOmKeyInfo) event.getValue(); - objectCountMap.computeIfPresent(countKey, - (k, count) -> count + repeatedOmKeyInfo.getOmKeyInfoList().size()); - Pair result = repeatedOmKeyInfo.getTotalSize(); - unreplicatedSizeCountMap.computeIfPresent(unReplicatedSizeKey, - (k, size) -> size + result.getLeft()); - replicatedSizeCountMap.computeIfPresent(replicatedSizeKey, - (k, size) -> size + result.getRight()); + HashMap unReplicatedSizeMap, + HashMap replicatedSizeMap) + throws IOException { + OmTableHandler tableHandler = tableHandlers.get(tableName); + if (event.getValue() != null) { + if (tableHandler != null) { + tableHandler.handlePutEvent(event, tableName, objectCountMap, + unReplicatedSizeMap, replicatedSizeMap); + } else { + String countKey = getTableCountKeyFromTable(tableName); + objectCountMap.computeIfPresent(countKey, (k, count) -> count + 1L); + } } } private void handleDeleteEvent(OMDBUpdateEvent event, String tableName, - Collection sizeRelatedTables, HashMap objectCountMap, - HashMap unreplicatedSizeCountMap, - HashMap replicatedSizeCountMap) { - + HashMap unReplicatedSizeMap, + HashMap replicatedSizeMap) + throws IOException { + OmTableHandler tableHandler = tableHandlers.get(tableName); if (event.getValue() != null) { - if (sizeRelatedTables.contains(tableName)) { - handleSizeRelatedTableDeleteEvent(event, tableName, objectCountMap, - unreplicatedSizeCountMap, replicatedSizeCountMap); + if (tableHandler != null) { + tableHandler.handleDeleteEvent(event, tableName, objectCountMap, + unReplicatedSizeMap, replicatedSizeMap); } else { String countKey = getTableCountKeyFromTable(tableName); objectCountMap.computeIfPresent(countKey, @@ -335,109 +250,28 @@ private void handleDeleteEvent(OMDBUpdateEvent event, } } - private void handleSizeRelatedTableDeleteEvent( - OMDBUpdateEvent event, - String tableName, - HashMap objectCountMap, - HashMap unreplicatedSizeCountMap, - HashMap replicatedSizeCountMap) { - - String countKey = getTableCountKeyFromTable(tableName); - String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName); - String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName); - - if (event.getValue() instanceof OmKeyInfo) { - // Handle DELETE for OpenKeyTable & OpenFileTable - OmKeyInfo omKeyInfo = (OmKeyInfo) event.getValue(); - objectCountMap.computeIfPresent(countKey, - (k, count) -> count > 0 ? count - 1L : 0L); - unreplicatedSizeCountMap.computeIfPresent(unReplicatedSizeKey, - (k, size) -> size > omKeyInfo.getDataSize() ? - size - omKeyInfo.getDataSize() : 0L); - replicatedSizeCountMap.computeIfPresent(replicatedSizeKey, - (k, size) -> size > omKeyInfo.getReplicatedSize() ? - size - omKeyInfo.getReplicatedSize() : 0L); - } else if (event.getValue() instanceof RepeatedOmKeyInfo) { - // Handle DELETE for DeletedTable - RepeatedOmKeyInfo repeatedOmKeyInfo = - (RepeatedOmKeyInfo) event.getValue(); - objectCountMap.computeIfPresent(countKey, (k, count) -> count > 0 ? - count - repeatedOmKeyInfo.getOmKeyInfoList().size() : 0L); - Pair result = repeatedOmKeyInfo.getTotalSize(); - unreplicatedSizeCountMap.computeIfPresent(unReplicatedSizeKey, - (k, size) -> size > result.getLeft() ? size - result.getLeft() : 0L); - replicatedSizeCountMap.computeIfPresent(replicatedSizeKey, - (k, size) -> size > result.getRight() ? size - result.getRight() : - 0L); - } - } private void handleUpdateEvent(OMDBUpdateEvent event, String tableName, - Collection sizeRelatedTables, HashMap objectCountMap, - HashMap unreplicatedSizeCountMap, - HashMap replicatedSizeCountMap) { + HashMap unReplicatedSizeMap, + HashMap replicatedSizeMap) { + OmTableHandler tableHandler = tableHandlers.get(tableName); if (event.getValue() != null) { - if (sizeRelatedTables.contains(tableName)) { + if (tableHandler != null) { // Handle update for only size related tables - handleSizeRelatedTableUpdateEvent(event, tableName, objectCountMap, - unreplicatedSizeCountMap, replicatedSizeCountMap); + tableHandler.handleUpdateEvent(event, tableName, objectCountMap, + unReplicatedSizeMap, replicatedSizeMap); } } } - - private void handleSizeRelatedTableUpdateEvent( - OMDBUpdateEvent event, - String tableName, - HashMap objectCountMap, - HashMap unreplicatedSizeCountMap, - HashMap replicatedSizeCountMap) { - - if (event.getOldValue() == null) { - LOG.warn("Update event does not have the old Key Info for {}.", - event.getKey()); - return; - } - String countKey = getTableCountKeyFromTable(tableName); - String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName); - String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName); - - // In Update event the count for the open table will not change. So we don't - // need to update the count. Except for RepeatedOmKeyInfo, for which the - // size of omKeyInfoList can change - if (event.getValue() instanceof OmKeyInfo) { - // Handle UPDATE for OpenKeyTable & OpenFileTable - OmKeyInfo oldKeyInfo = (OmKeyInfo) event.getOldValue(); - OmKeyInfo newKeyInfo = (OmKeyInfo) event.getValue(); - unreplicatedSizeCountMap.computeIfPresent(unReplicatedSizeKey, - (k, size) -> size - oldKeyInfo.getDataSize() + - newKeyInfo.getDataSize()); - replicatedSizeCountMap.computeIfPresent(replicatedSizeKey, - (k, size) -> size - oldKeyInfo.getReplicatedSize() + - newKeyInfo.getReplicatedSize()); - } else if (event.getValue() instanceof RepeatedOmKeyInfo) { - // Handle UPDATE for DeletedTable - RepeatedOmKeyInfo oldRepeatedOmKeyInfo = - (RepeatedOmKeyInfo) event.getOldValue(); - RepeatedOmKeyInfo newRepeatedOmKeyInfo = - (RepeatedOmKeyInfo) event.getValue(); - objectCountMap.computeIfPresent(countKey, - (k, count) -> count > 0 ? - count - oldRepeatedOmKeyInfo.getOmKeyInfoList().size() + - newRepeatedOmKeyInfo.getOmKeyInfoList().size() : 0L); - Pair oldSize = oldRepeatedOmKeyInfo.getTotalSize(); - Pair newSize = newRepeatedOmKeyInfo.getTotalSize(); - unreplicatedSizeCountMap.computeIfPresent(unReplicatedSizeKey, - (k, size) -> size - oldSize.getLeft() + newSize.getLeft()); - replicatedSizeCountMap.computeIfPresent(replicatedSizeKey, - (k, size) -> size - oldSize.getRight() + newSize.getRight()); - } - } - - + /** + * Write the updated count and size information to the database. + * + * @param dataMap Map containing the updated count and size information. + */ private void writeDataToDB(Map dataMap) { List insertGlobalStats = new ArrayList<>(); List updateGlobalStats = new ArrayList<>(); @@ -461,6 +295,11 @@ private void writeDataToDB(Map dataMap) { globalStatsDao.update(updateGlobalStats); } + /** + * Initializes and returns a count map with the counts for the tables. + * + * @return The count map containing the counts for each table. + */ private HashMap initializeCountMap() { Collection tables = getTaskTables(); HashMap objectCountMap = new HashMap<>(tables.size()); @@ -478,11 +317,13 @@ private HashMap initializeCountMap() { * @return The size map containing the size counts for each table. */ private HashMap initializeSizeMap(boolean replicated) { - Collection tables = getTablesToCalculateSize(); - HashMap sizeCountMap = new HashMap<>(tables.size()); - for (String tableName : tables) { - String key = replicated ? getReplicatedSizeKeyFromTable(tableName) : - getUnReplicatedSizeKeyFromTable(tableName); + HashMap sizeCountMap = new HashMap<>(); + for (Map.Entry entry : tableHandlers.entrySet()) { + String tableName = entry.getKey(); + OmTableHandler tableHandler = entry.getValue(); + String key = + replicated ? tableHandler.getReplicatedSizeKeyFromTable(tableName) : + tableHandler.getUnReplicatedSizeKeyFromTable(tableName); sizeCountMap.put(key, getValueForKey(key)); } return sizeCountMap; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OpenKeysInsightHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OpenKeysInsightHandler.java new file mode 100644 index 000000000000..7a27d29d8f28 --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OpenKeysInsightHandler.java @@ -0,0 +1,163 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.tasks; + +import org.apache.commons.lang3.tuple.Triple; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.HashMap; + +/** + * Manages records in the OpenKey Table, updating counts and sizes of + * open keys in the backend. + */ +public class OpenKeysInsightHandler implements OmTableHandler { + + private static final Logger LOG = + LoggerFactory.getLogger(OpenKeysInsightHandler.class); + + /** + * Invoked by the process method to add information on those keys that have + * been open in the backend. + */ + @Override + public void handlePutEvent(OMDBUpdateEvent event, + String tableName, + HashMap objectCountMap, + HashMap unReplicatedSizeMap, + HashMap replicatedSizeMap) { + + String countKey = getTableCountKeyFromTable(tableName); + String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName); + String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName); + + if (event.getValue() != null) { + OmKeyInfo omKeyInfo = (OmKeyInfo) event.getValue(); + objectCountMap.computeIfPresent(countKey, (k, count) -> count + 1L); + unReplicatedSizeMap.computeIfPresent(unReplicatedSizeKey, + (k, size) -> size + omKeyInfo.getDataSize()); + replicatedSizeMap.computeIfPresent(replicatedSizeKey, + (k, size) -> size + omKeyInfo.getReplicatedSize()); + } else { + LOG.warn("Put event does not have the Key Info for {}.", + event.getKey()); + } + } + + /** + * Invoked by the process method to delete information on those keys that are + * no longer closed in the backend. + */ + @Override + public void handleDeleteEvent(OMDBUpdateEvent event, + String tableName, + HashMap objectCountMap, + HashMap unReplicatedSizeMap, + HashMap replicatedSizeMap) { + + String countKey = getTableCountKeyFromTable(tableName); + String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName); + String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName); + + if (event.getValue() != null) { + OmKeyInfo omKeyInfo = (OmKeyInfo) event.getValue(); + objectCountMap.computeIfPresent(countKey, + (k, count) -> count > 0 ? count - 1L : 0L); + unReplicatedSizeMap.computeIfPresent(unReplicatedSizeKey, + (k, size) -> size > omKeyInfo.getDataSize() ? + size - omKeyInfo.getDataSize() : 0L); + replicatedSizeMap.computeIfPresent(replicatedSizeKey, + (k, size) -> size > omKeyInfo.getReplicatedSize() ? + size - omKeyInfo.getReplicatedSize() : 0L); + } else { + LOG.warn("Delete event does not have the Key Info for {}.", + event.getKey()); + } + } + + /** + * Invoked by the process method to update information on those open keys that + * have been updated in the backend. + */ + @Override + public void handleUpdateEvent(OMDBUpdateEvent event, + String tableName, + HashMap objectCountMap, + HashMap unReplicatedSizeMap, + HashMap replicatedSizeMap) { + + if (event.getValue() != null) { + if (event.getOldValue() == null) { + LOG.warn("Update event does not have the old Key Info for {}.", + event.getKey()); + return; + } + String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName); + String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName); + + // In Update event the count for the open table will not change. So we + // don't need to update the count. + OmKeyInfo oldKeyInfo = (OmKeyInfo) event.getOldValue(); + OmKeyInfo newKeyInfo = (OmKeyInfo) event.getValue(); + unReplicatedSizeMap.computeIfPresent(unReplicatedSizeKey, + (k, size) -> size - oldKeyInfo.getDataSize() + + newKeyInfo.getDataSize()); + replicatedSizeMap.computeIfPresent(replicatedSizeKey, + (k, size) -> size - oldKeyInfo.getReplicatedSize() + + newKeyInfo.getReplicatedSize()); + } else { + LOG.warn("Update event does not have the Key Info for {}.", + event.getKey()); + } + } + + /** + * This method is called by the reprocess method. It calculates the record + * counts for both the open key table and the open file table. Additionally, + * it computes the sizes of both replicated and unreplicated keys + * that are currently open in the backend. + */ + @Override + public Triple getTableSizeAndCount( + TableIterator> iterator) + throws IOException { + long count = 0; + long unReplicatedSize = 0; + long replicatedSize = 0; + + if (iterator != null) { + while (iterator.hasNext()) { + Table.KeyValue kv = iterator.next(); + if (kv != null && kv.getValue() != null) { + OmKeyInfo omKeyInfo = (OmKeyInfo) kv.getValue(); + unReplicatedSize += omKeyInfo.getDataSize(); + replicatedSize += omKeyInfo.getReplicatedSize(); + count++; + } + } + } + return Triple.of(count, unReplicatedSize, replicatedSize); + } + +} diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java index 42d69e030f31..b1aecc9a4f4e 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java @@ -397,23 +397,31 @@ public static void writeDirToOm(OMMetadataManager omMetadataManager, .build()); } + @SuppressWarnings("parameternumber") public static void writeDeletedDirToOm(OMMetadataManager omMetadataManager, String bucketName, String volumeName, String dirName, long parentObjectId, long bucketObjectId, - long volumeObjectId) + long volumeObjectId, + long objectId) throws IOException { - // DB key in DeletedDirectoryTable => "volumeID/bucketID/parentId/dirName" - String omKey = omMetadataManager.getOzonePathKey(volumeObjectId, - bucketObjectId, parentObjectId, dirName); + // DB key in DeletedDirectoryTable => + // "volumeID/bucketID/parentId/dirName/dirObjectId" + + String ozoneDbKey = omMetadataManager.getOzonePathKey(volumeObjectId, + bucketObjectId, parentObjectId, dirName); + String ozoneDeleteKey = omMetadataManager.getOzoneDeletePathKey( + objectId, ozoneDbKey); + - omMetadataManager.getDeletedDirTable().put(omKey, + omMetadataManager.getDeletedDirTable().put(ozoneDeleteKey, new OmKeyInfo.Builder() .setBucketName(bucketName) .setVolumeName(volumeName) .setKeyName(dirName) + .setObjectID(objectId) .setReplicationConfig(StandaloneReplicationConfig.getInstance(ONE)) .build()); } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java index 7ac29e5d0f09..e0a4889350c7 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java @@ -280,8 +280,9 @@ private void initializeInjector() throws Exception { utilizationSchemaDefinition); fileSizeCountTask = new FileSizeCountTask(fileCountBySizeDao, utilizationSchemaDefinition); - omTableInsightTask = new OmTableInsightTask( - globalStatsDao, sqlConfiguration, reconOMMetadataManager); + omTableInsightTask = + new OmTableInsightTask(globalStatsDao, sqlConfiguration, + reconOMMetadataManager); containerHealthSchemaManager = reconTestInjector.getInstance(ContainerHealthSchemaManager.class); clusterStateEndpoint = @@ -504,11 +505,11 @@ public void setUp() throws Exception { // Populate the deletedDirectories table in OM DB writeDeletedDirToOm(reconOMMetadataManager, "Bucket1", "Volume1", "dir1", - 3L, 2L, 1L); + 3L, 2L, 1L, 23L); writeDeletedDirToOm(reconOMMetadataManager, "Bucket2", "Volume2", "dir2", - 6L, 5L, 4L); + 6L, 5L, 4L, 22L); writeDeletedDirToOm(reconOMMetadataManager, "Bucket3", "Volume3", "dir3", - 9L, 8L, 7L); + 9L, 8L, 7L, 21L); // Truncate global stats table before running each test dslContext.truncate(GLOBAL_STATS); @@ -583,7 +584,7 @@ public void testGetDatanodes() throws Exception { (DatanodesResponse) response1.getEntity(); DatanodeMetadata datanodeMetadata1 = datanodesResponse1.getDatanodes().stream().filter(datanodeMetadata -> - datanodeMetadata.getHostname().equals("host1.datanode")) + datanodeMetadata.getHostname().equals("host1.datanode")) .findFirst().orElse(null); return (datanodeMetadata1 != null && datanodeMetadata1.getContainers() == 1 && @@ -688,7 +689,7 @@ public void testGetMetricsResponse() throws Exception { byte[] fileBytes = FileUtils.readFileToByteArray( new File(classLoader.getResource(PROMETHEUS_TEST_RESPONSE_FILE) .getFile()) - ); + ); verify(outputStreamMock).write(fileBytes, 0, fileBytes.length); } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmTableInsightTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmTableInsightTask.java index 9c0193e5020b..7472cba2619f 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmTableInsightTask.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmTableInsightTask.java @@ -21,20 +21,28 @@ import org.apache.commons.lang3.tuple.ImmutablePair; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TypedTable; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; +import org.apache.hadoop.ozone.recon.ReconTestInjector; +import org.apache.hadoop.ozone.recon.api.types.NSSummary; import org.apache.hadoop.ozone.recon.persistence.AbstractReconSqlDBTest; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; -import org.apache.hadoop.ozone.recon.tasks.OMDBUpdateEvent.OMUpdateEventBuilder; - +import org.apache.hadoop.ozone.recon.spi.impl.ReconNamespaceSummaryManagerImpl; import org.hadoop.ozone.recon.schema.tables.daos.GlobalStatsDao; import org.jooq.DSLContext; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; import org.junit.jupiter.api.io.TempDir; import java.io.IOException; @@ -44,18 +52,20 @@ import java.util.Arrays; import java.util.List; -import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.BUCKET_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.VOLUME_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_DIR_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_KEY_TABLE; -import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.VOLUME_TABLE; -import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.BUCKET_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDeletedKeysToOm; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeOpenKeyToOm; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeOpenFileToOm; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.initializeNewOmMetadataManager; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeKeyToOm; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDeletedDirToOm; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeOpenKeyToOm; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeOpenFileToOm; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDeletedKeysToOm; import static org.apache.hadoop.ozone.recon.tasks.OMDBUpdateEvent.OMDBUpdateAction.DELETE; import static org.apache.hadoop.ozone.recon.tasks.OMDBUpdateEvent.OMDBUpdateAction.PUT; import static org.apache.hadoop.ozone.recon.tasks.OMDBUpdateEvent.OMDBUpdateAction.UPDATE; @@ -66,25 +76,79 @@ import static org.mockito.Mockito.when; /** - * Unit test for Object Count Task. + * This test class is designed for the OM Table Insight Task. It conducts tests + * for tables that require both Size and Count, as well as for those that only + * require Count. */ public class TestOmTableInsightTask extends AbstractReconSqlDBTest { @TempDir private Path temporaryFolder; - private GlobalStatsDao globalStatsDao; - private OmTableInsightTask omTableInsightTask; - private DSLContext dslContext; + private static GlobalStatsDao globalStatsDao; + private static OmTableInsightTask omTableInsightTask; + private static DSLContext dslContext; private boolean isSetupDone = false; - private ReconOMMetadataManager reconOMMetadataManager; + private static ReconOMMetadataManager reconOMMetadataManager; + private static NSSummaryTaskWithFSO nSSummaryTaskWithFso; + private static OzoneConfiguration ozoneConfiguration; + private static ReconNamespaceSummaryManagerImpl reconNamespaceSummaryManager; + + // Object names in FSO-enabled format + private static final String VOL = "volume1"; + private static final String BUCKET_ONE = "bucket1"; + private static final String BUCKET_TWO = "bucket2"; + private static final String KEY_ONE = "file1"; + private static final String KEY_TWO = "file2"; + private static final String KEY_THREE = "dir1/dir2/file3"; + private static final String FILE_ONE = "file1"; + private static final String FILE_TWO = "file2"; + private static final String FILE_THREE = "file3"; + private static final String DIR_ONE = "dir1"; + private static final String DIR_TWO = "dir2"; + private static final String DIR_THREE = "dir3"; + + + private static final long VOL_OBJECT_ID = 0L; + private static final long BUCKET_ONE_OBJECT_ID = 1L; + private static final long BUCKET_TWO_OBJECT_ID = 2L; + private static final long KEY_ONE_OBJECT_ID = 3L; + private static final long DIR_ONE_OBJECT_ID = 14L; + private static final long KEY_TWO_OBJECT_ID = 5L; + private static final long DIR_TWO_OBJECT_ID = 17L; + private static final long KEY_THREE_OBJECT_ID = 8L; + private static final long DIR_THREE_OBJECT_ID = 10L; + + private static final long KEY_ONE_SIZE = 500L; + private static final long KEY_TWO_SIZE = 1025L; + private static final long KEY_THREE_SIZE = 2000L; + + // mock client's path requests + private static final String TEST_USER = "TestUser"; + + @Mock + private Table nsSummaryTable; private void initializeInjector() throws IOException { + ozoneConfiguration = new OzoneConfiguration(); reconOMMetadataManager = getTestReconOmMetadataManager( initializeNewOmMetadataManager(Files.createDirectory( temporaryFolder.resolve("JunitOmDBDir")).toFile()), Files.createDirectory(temporaryFolder.resolve("NewDir")).toFile()); globalStatsDao = getDao(GlobalStatsDao.class); + + ReconTestInjector reconTestInjector = + new ReconTestInjector.Builder(temporaryFolder.toFile()) + .withReconSqlDb() + .withReconOm(reconOMMetadataManager) + .withContainerDB() + .build(); + reconNamespaceSummaryManager = reconTestInjector.getInstance( + ReconNamespaceSummaryManagerImpl.class); + omTableInsightTask = new OmTableInsightTask( globalStatsDao, getConfiguration(), reconOMMetadataManager); + nSSummaryTaskWithFso = new NSSummaryTaskWithFSO( + reconNamespaceSummaryManager, reconOMMetadataManager, + ozoneConfiguration); dslContext = getDslContext(); } @@ -95,10 +159,182 @@ public void setUp() throws IOException { initializeInjector(); isSetupDone = true; } + MockitoAnnotations.openMocks(this); // Truncate table before running each test dslContext.truncate(GLOBAL_STATS); } + /** + * Populate OM-DB with the following structure. + * volume1 + * | \ + * bucket1 bucket2 + * / \ \ + * dir1 dir2 dir3 + * / \ \ + * file1 file2 file3 + * + * @throws IOException + */ + private void populateOMDB() throws IOException { + + // Create 2 Buckets bucket1 and bucket2 + OmBucketInfo bucketInfo1 = OmBucketInfo.newBuilder() + .setVolumeName(VOL) + .setBucketName(BUCKET_ONE) + .setObjectID(BUCKET_ONE_OBJECT_ID) + .build(); + String bucketKey = reconOMMetadataManager.getBucketKey( + bucketInfo1.getVolumeName(), bucketInfo1.getBucketName()); + reconOMMetadataManager.getBucketTable().put(bucketKey, bucketInfo1); + OmBucketInfo bucketInfo2 = OmBucketInfo.newBuilder() + .setVolumeName(VOL) + .setBucketName(BUCKET_TWO) + .setObjectID(BUCKET_TWO_OBJECT_ID) + .build(); + bucketKey = reconOMMetadataManager.getBucketKey( + bucketInfo2.getVolumeName(), bucketInfo2.getBucketName()); + reconOMMetadataManager.getBucketTable().put(bucketKey, bucketInfo2); + + // Create a single volume named volume1 + String volumeKey = reconOMMetadataManager.getVolumeKey(VOL); + OmVolumeArgs args = + OmVolumeArgs.newBuilder() + .setObjectID(VOL_OBJECT_ID) + .setVolume(VOL) + .setAdminName(TEST_USER) + .setOwnerName(TEST_USER) + .build(); + reconOMMetadataManager.getVolumeTable().put(volumeKey, args); + + // Generate keys for the File Table + writeKeyToOm(reconOMMetadataManager, + KEY_ONE, + BUCKET_ONE, + VOL, + FILE_ONE, + KEY_ONE_OBJECT_ID, + DIR_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + KEY_ONE_SIZE, + BucketLayout.FILE_SYSTEM_OPTIMIZED); + writeKeyToOm(reconOMMetadataManager, + KEY_TWO, + BUCKET_ONE, + VOL, + FILE_TWO, + KEY_TWO_OBJECT_ID, + DIR_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + KEY_TWO_SIZE, + BucketLayout.FILE_SYSTEM_OPTIMIZED); + writeKeyToOm(reconOMMetadataManager, + KEY_THREE, + BUCKET_ONE, + VOL, + FILE_THREE, + KEY_THREE_OBJECT_ID, + DIR_TWO_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + KEY_THREE_SIZE, + BucketLayout.FILE_SYSTEM_OPTIMIZED); + + // Generate Deleted Directories in OM + writeDeletedDirToOm(reconOMMetadataManager, + BUCKET_ONE, + VOL, + DIR_ONE, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + DIR_ONE_OBJECT_ID); + writeDeletedDirToOm(reconOMMetadataManager, + BUCKET_ONE, + VOL, + DIR_TWO, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + DIR_TWO_OBJECT_ID); + writeDeletedDirToOm(reconOMMetadataManager, + BUCKET_TWO, + VOL, + DIR_THREE, + BUCKET_TWO_OBJECT_ID, + BUCKET_TWO_OBJECT_ID, + VOL_OBJECT_ID, + DIR_THREE_OBJECT_ID); + } + + @Test + public void testReprocessForDeletedDirectory() throws Exception { + // Create keys and deleted directories + populateOMDB(); + + // Generate NamespaceSummary for the OM DB + nSSummaryTaskWithFso.reprocessWithFSO(reconOMMetadataManager); + + Pair result = + omTableInsightTask.reprocess(reconOMMetadataManager); + assertTrue(result.getRight()); + assertEquals(3, getCountForTable(DELETED_DIR_TABLE)); + } + + @Test + public void testProcessForDeletedDirectoryTable() throws IOException { + // Prepare mock data size + Long expectedSize1 = 1000L; + Long expectedSize2 = 2000L; + NSSummary nsSummary1 = new NSSummary(); + NSSummary nsSummary2 = new NSSummary(); + nsSummary1.setSizeOfFiles(expectedSize1); + nsSummary2.setSizeOfFiles(expectedSize2); + when(nsSummaryTable.get(1L)).thenReturn(nsSummary1); + when(nsSummaryTable.get(2L)).thenReturn(nsSummary1); + when(nsSummaryTable.get(3L)).thenReturn(nsSummary2); + when(nsSummaryTable.get(4L)).thenReturn(nsSummary2); + when(nsSummaryTable.get(5L)).thenReturn(nsSummary2); + + /* DB key in DeletedDirectoryTable => + "/volumeId/bucketId/parentId/dirName/dirObjectId" */ + List paths = Arrays.asList( + "/18/28/22/dir1/1", + "/18/26/23/dir1/2", + "/18/20/24/dir1/3", + "/18/21/25/dir1/4", + "/18/27/26/dir1/5" + ); + + // Testing PUT events + // Create 5 OMDBUpdateEvent instances for 5 different deletedDirectory paths + ArrayList putEvents = new ArrayList<>(); + for (long i = 0L; i < 5L; i++) { + putEvents.add(getOMUpdateEvent(paths.get((int) i), + getOmKeyInfo("vol1", "bucket1", DIR_ONE, (i + 1), false), + DELETED_DIR_TABLE, PUT, null)); + } + OMUpdateEventBatch putEventBatch = new OMUpdateEventBatch(putEvents); + omTableInsightTask.process(putEventBatch); + assertEquals(5, getCountForTable(DELETED_DIR_TABLE)); + + + // Testing DELETE events + // Create 2 OMDBUpdateEvent instances for 2 different deletedDirectory paths + ArrayList deleteEvents = new ArrayList<>(); + deleteEvents.add(getOMUpdateEvent(paths.get(0), + getOmKeyInfo("vol1", "bucket1", DIR_ONE, 1L, false), DELETED_DIR_TABLE, + DELETE, null)); + deleteEvents.add(getOMUpdateEvent(paths.get(2), + getOmKeyInfo("vol1", "bucket1", DIR_ONE, 3L, false), DELETED_DIR_TABLE, + DELETE, null)); + OMUpdateEventBatch deleteEventBatch = new OMUpdateEventBatch(deleteEvents); + omTableInsightTask.process(deleteEventBatch); + assertEquals(3, getCountForTable(DELETED_DIR_TABLE)); + } + @Test public void testReprocessForCount() throws Exception { OMMetadataManager omMetadataManager = mock(OmMetadataManagerImpl.class); @@ -106,27 +342,32 @@ public void testReprocessForCount() throws Exception { // Mock 5 rows in each table and test the count for (String tableName : omTableInsightTask.getTaskTables()) { TypedTable table = mock(TypedTable.class); - TypedTable.TypedTableIterator mockIter = mock(TypedTable - .TypedTableIterator.class); + TypedTable.TypedTableIterator mockIter = + mock(TypedTable.TypedTableIterator.class); when(table.iterator()).thenReturn(mockIter); when(omMetadataManager.getTable(tableName)).thenReturn(table); - when(mockIter.hasNext()) - .thenReturn(true) - .thenReturn(true) - .thenReturn(true) - .thenReturn(true) - .thenReturn(true) - .thenReturn(false); + when(mockIter.hasNext()).thenReturn(true, true, true, true, true, false); + TypedTable.TypedKeyValue mockKeyValue = mock(TypedTable.TypedKeyValue.class); - when(mockKeyValue.getValue()).thenReturn(mock(OmKeyInfo.class)); + + if (tableName.equals(DELETED_TABLE)) { + RepeatedOmKeyInfo keyInfo = mock(RepeatedOmKeyInfo.class); + when(keyInfo.getTotalSize()).thenReturn(ImmutablePair.of(100L, 100L)); + when(keyInfo.getOmKeyInfoList()).thenReturn( + Arrays.asList(mock(OmKeyInfo.class))); + when(mockKeyValue.getValue()).thenReturn(keyInfo); + } else { + when(mockKeyValue.getValue()).thenReturn(mock(OmKeyInfo.class)); + } + when(mockIter.next()).thenReturn(mockKeyValue); } Pair result = omTableInsightTask.reprocess(omMetadataManager); - assertTrue(result.getRight()); + assertTrue(result.getRight()); assertEquals(5L, getCountForTable(KEY_TABLE)); assertEquals(5L, getCountForTable(VOLUME_TABLE)); assertEquals(5L, getCountForTable(BUCKET_TABLE)); @@ -134,7 +375,6 @@ public void testReprocessForCount() throws Exception { assertEquals(5L, getCountForTable(DELETED_TABLE)); } - @Test public void testReprocessForOpenKeyTable() throws Exception { // Populate the OpenKeys table in OM DB @@ -199,44 +439,73 @@ public void testReprocessForDeletedTable() throws Exception { @Test public void testProcessForCount() { - ArrayList events = new ArrayList<>(); - // Create 5 put, 1 delete and 1 update event for each table + List initialEvents = new ArrayList<>(); + + // Creating events for each table except the deleted table for (String tableName : omTableInsightTask.getTaskTables()) { + if (tableName.equals(DELETED_TABLE)) { + continue; // Skipping deleted table as it has a separate test + } + + // Adding 5 PUT events per table for (int i = 0; i < 5; i++) { - events.add(getOMUpdateEvent("item" + i, null, tableName, PUT, null)); + initialEvents.add( + getOMUpdateEvent("item" + i, mock(OmKeyInfo.class), tableName, PUT, + null)); } - // for delete event, if value is set to null, the counter will not be - // decremented. This is because the value will be null if item does not - // exist in the database and there is no need to delete. - events.add(getOMUpdateEvent("item0", mock(OmKeyInfo.class), tableName, - DELETE, null)); - events.add(getOMUpdateEvent("item1", null, tableName, UPDATE, null)); + + // Adding 1 DELETE event where value is null, indicating non-existence + // in the database. + initialEvents.add( + getOMUpdateEvent("item0", mock(OmKeyInfo.class), tableName, DELETE, + null)); + // Adding 1 UPDATE event. This should not affect the count. + initialEvents.add( + getOMUpdateEvent("item1", mock(OmKeyInfo.class), tableName, UPDATE, + mock(OmKeyInfo.class))); } - OMUpdateEventBatch omUpdateEventBatch = new OMUpdateEventBatch(events); - omTableInsightTask.process(omUpdateEventBatch); - // Verify 4 items in each table. (5 puts - 1 delete + 0 update) - assertEquals(4L, getCountForTable(KEY_TABLE)); - assertEquals(4L, getCountForTable(VOLUME_TABLE)); - assertEquals(4L, getCountForTable(BUCKET_TABLE)); - assertEquals(4L, getCountForTable(FILE_TABLE)); + // Processing the initial batch of events + OMUpdateEventBatch initialBatch = new OMUpdateEventBatch(initialEvents); + omTableInsightTask.process(initialBatch); - // add a new key and simulate delete on non-existing item (value: null) - ArrayList newEvents = new ArrayList<>(); + // Verifying the count in each table for (String tableName : omTableInsightTask.getTaskTables()) { - newEvents.add(getOMUpdateEvent("item5", null, tableName, PUT, null)); - // This delete event should be a noop since value is null - newEvents.add(getOMUpdateEvent("item0", null, tableName, DELETE, null)); + if (tableName.equals(DELETED_TABLE)) { + continue; + } + assertEquals(4L, getCountForTable( + tableName)); // 4 items expected after processing (5 puts - 1 delete) } - omUpdateEventBatch = new OMUpdateEventBatch(newEvents); - omTableInsightTask.process(omUpdateEventBatch); + List additionalEvents = new ArrayList<>(); + // Simulating new PUT and DELETE events + for (String tableName : omTableInsightTask.getTaskTables()) { + if (tableName.equals(DELETED_TABLE)) { + continue; + } + // Adding 1 new PUT event + additionalEvents.add( + getOMUpdateEvent("item6", mock(OmKeyInfo.class), tableName, PUT, + null)); + // Attempting to delete a non-existing item (value: null) + additionalEvents.add( + getOMUpdateEvent("item0", null, tableName, DELETE, null)); + } - // Verify 5 items in each table. (1 new put + 0 delete) - assertEquals(5L, getCountForTable(KEY_TABLE)); - assertEquals(5L, getCountForTable(VOLUME_TABLE)); - assertEquals(5L, getCountForTable(BUCKET_TABLE)); - assertEquals(5L, getCountForTable(FILE_TABLE)); + // Processing the additional events + OMUpdateEventBatch additionalBatch = + new OMUpdateEventBatch(additionalEvents); + omTableInsightTask.process(additionalBatch); + // Verifying the final count in each table + for (String tableName : omTableInsightTask.getTaskTables()) { + if (tableName.equals(DELETED_TABLE)) { + continue; + } + // 5 items expected after processing the additional events. + assertEquals(5L, getCountForTable( + tableName)); + } } @Test @@ -247,35 +516,38 @@ public void testProcessForOpenKeyTableAndOpenFileTable() { when(omKeyInfo.getDataSize()).thenReturn(sizeToBeReturned); when(omKeyInfo.getReplicatedSize()).thenReturn(sizeToBeReturned * 3); - // Test PUT events + // Test PUT events. + // Add 5 PUT events for OpenKeyTable and OpenFileTable. ArrayList putEvents = new ArrayList<>(); - for (String tableName : omTableInsightTask.getTablesToCalculateSize()) { - for (int i = 0; i < 5; i++) { - putEvents.add( - getOMUpdateEvent("item" + i, omKeyInfo, tableName, PUT, null)); - } + for (int i = 0; i < 10; i++) { + String table = (i < 5) ? OPEN_KEY_TABLE : OPEN_FILE_TABLE; + putEvents.add(getOMUpdateEvent("item" + i, omKeyInfo, table, PUT, null)); } + OMUpdateEventBatch putEventBatch = new OMUpdateEventBatch(putEvents); omTableInsightTask.process(putEventBatch); - // After 5 PUTs, size should be 5 * 1000 = 5000 for each size-related table - for (String tableName : omTableInsightTask.getTablesToCalculateSize()) { + // After 5 PUTs, size should be 5 * 1000 = 5000 + for (String tableName : new ArrayList<>( + Arrays.asList(OPEN_KEY_TABLE, OPEN_FILE_TABLE))) { assertEquals(5000L, getUnReplicatedSizeForTable(tableName)); assertEquals(15000L, getReplicatedSizeForTable(tableName)); } // Test DELETE events ArrayList deleteEvents = new ArrayList<>(); - for (String tableName : omTableInsightTask.getTablesToCalculateSize()) { - // Delete "item0" - deleteEvents.add( - getOMUpdateEvent("item0", omKeyInfo, tableName, DELETE, null)); - } + // Delete "item0" for OpenKeyTable and OpenFileTable. + deleteEvents.add( + getOMUpdateEvent("item0", omKeyInfo, OPEN_KEY_TABLE, DELETE, null)); + deleteEvents.add( + getOMUpdateEvent("item0", omKeyInfo, OPEN_FILE_TABLE, DELETE, null)); + OMUpdateEventBatch deleteEventBatch = new OMUpdateEventBatch(deleteEvents); omTableInsightTask.process(deleteEventBatch); // After deleting "item0", size should be 4 * 1000 = 4000 - for (String tableName : omTableInsightTask.getTablesToCalculateSize()) { + for (String tableName : new ArrayList<>( + Arrays.asList(OPEN_KEY_TABLE, OPEN_FILE_TABLE))) { assertEquals(4000L, getUnReplicatedSizeForTable(tableName)); assertEquals(12000L, getReplicatedSizeForTable(tableName)); } @@ -283,7 +555,8 @@ public void testProcessForOpenKeyTableAndOpenFileTable() { // Test UPDATE events ArrayList updateEvents = new ArrayList<>(); Long newSizeToBeReturned = 2000L; - for (String tableName : omTableInsightTask.getTablesToCalculateSize()) { + for (String tableName : new ArrayList<>( + Arrays.asList(OPEN_KEY_TABLE, OPEN_FILE_TABLE))) { // Update "item1" with a new size OmKeyInfo newKeyInfo = mock(OmKeyInfo.class); when(newKeyInfo.getDataSize()).thenReturn(newSizeToBeReturned); @@ -291,12 +564,14 @@ public void testProcessForOpenKeyTableAndOpenFileTable() { updateEvents.add( getOMUpdateEvent("item1", newKeyInfo, tableName, UPDATE, omKeyInfo)); } + OMUpdateEventBatch updateEventBatch = new OMUpdateEventBatch(updateEvents); omTableInsightTask.process(updateEventBatch); // After updating "item1", size should be 4000 - 1000 + 2000 = 5000 // presentValue - oldValue + newValue = updatedValue - for (String tableName : omTableInsightTask.getTablesToCalculateSize()) { + for (String tableName : new ArrayList<>( + Arrays.asList(OPEN_KEY_TABLE, OPEN_FILE_TABLE))) { assertEquals(5000L, getUnReplicatedSizeForTable(tableName)); assertEquals(15000L, getReplicatedSizeForTable(tableName)); } @@ -309,9 +584,10 @@ public void testProcessForDeletedTable() { new ImmutablePair<>(1000L, 3000L); ArrayList omKeyInfoList = new ArrayList<>(); // Add 5 OmKeyInfo objects to the list - for (int i = 0; i < 5; i++) { + for (long i = 0; i < 5; i++) { OmKeyInfo omKeyInfo = - getOmKeyInfo("sampleVol", "non_fso_Bucket", "non_fso_key1", true); + getOmKeyInfo("sampleVol", "non_fso_Bucket", "non_fso_key1", i + 1, + true); // Set properties of OmKeyInfo object if needed omKeyInfoList.add(omKeyInfo); } @@ -349,38 +625,14 @@ public void testProcessForDeletedTable() { // After deleting "item0", size should be 4 * 1000 = 4000 assertEquals(4000L, getUnReplicatedSizeForTable(DELETED_TABLE)); assertEquals(12000L, getReplicatedSizeForTable(DELETED_TABLE)); - - - // Test UPDATE events - ArrayList updateEvents = new ArrayList<>(); - // Update "item1" with new sizes - ImmutablePair newSizesToBeReturned = - new ImmutablePair<>(500L, 1500L); - RepeatedOmKeyInfo newRepeatedOmKeyInfo = mock(RepeatedOmKeyInfo.class); - when(newRepeatedOmKeyInfo.getTotalSize()).thenReturn(newSizesToBeReturned); - when(newRepeatedOmKeyInfo.getOmKeyInfoList()).thenReturn( - omKeyInfoList.subList(1, 5)); - OMUpdateEventBatch updateEventBatch = new OMUpdateEventBatch(updateEvents); - // For item1, newSize=500 and totalCount of deleted keys should be 4 - updateEvents.add( - getOMUpdateEvent("item1", newRepeatedOmKeyInfo, DELETED_TABLE, UPDATE, - repeatedOmKeyInfo)); - omTableInsightTask.process(updateEventBatch); - // Since one key has been deleted, total deleted keys should be 19 - assertEquals(19L, getCountForTable(DELETED_TABLE)); - // After updating "item1", size should be 4000 - 1000 + 500 = 3500 - // presentValue - oldValue + newValue = updatedValue - assertEquals(3500L, getUnReplicatedSizeForTable(DELETED_TABLE)); - assertEquals(10500L, getReplicatedSizeForTable(DELETED_TABLE)); } - private OMDBUpdateEvent getOMUpdateEvent( String name, Object value, String table, OMDBUpdateEvent.OMDBUpdateAction action, Object oldValue) { - return new OMUpdateEventBuilder() + return new OMDBUpdateEvent.OMUpdateEventBuilder() .setAction(action) .setKey(name) .setValue(value) @@ -405,7 +657,8 @@ private long getReplicatedSizeForTable(String tableName) { } private OmKeyInfo getOmKeyInfo(String volumeName, String bucketName, - String keyName, boolean isFile) { + String keyName, Long objectID, + boolean isFile) { return new OmKeyInfo.Builder() .setVolumeName(volumeName) .setBucketName(bucketName) @@ -414,6 +667,7 @@ private OmKeyInfo getOmKeyInfo(String volumeName, String bucketName, .setReplicationConfig(StandaloneReplicationConfig .getInstance(HddsProtos.ReplicationFactor.ONE)) .setDataSize(100L) + .setObjectID(objectID) .build(); } }