From a19f92a989ad2232208deb8761174d05c43f03ec Mon Sep 17 00:00:00 2001 From: arafat Date: Tue, 31 Oct 2023 11:15:23 +0530 Subject: [PATCH 01/19] HDDS-9534. Support namespace summaries (du, dist & counts) for LEGACY buckets with file system disabled. --- .../api/handlers/LegacyBucketHandler.java | 70 +++++++++++++++++ .../ozone/recon/api/types/NSSummary.java | 15 +++- .../ozone/recon/codec/NSSummaryCodec.java | 16 ++-- .../ozone/recon/tasks/NSSummaryTask.java | 51 ++++--------- .../tasks/NSSummaryTaskDbEventHandler.java | 3 +- .../recon/tasks/NSSummaryTaskWithFSO.java | 6 +- .../recon/tasks/NSSummaryTaskWithLegacy.java | 75 ++++++++++++------- 7 files changed, 158 insertions(+), 78 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java index 09f1c5bc7454..bb9babb51d67 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java @@ -212,6 +212,75 @@ public long handleDirectKeys(long parentId, boolean withReplica, return 0; } + if (nsSummary.isObjectStore()) { + keyDataSizeWithReplica += handleDirectKeysForOBSLayout( + parentId, withReplica, listFile, duData, keyTable, seekPrefix ); + } else { + keyDataSizeWithReplica += handleDirectKeysForFSOLayout( + parentId, withReplica, listFile, normalizedPath,duData, keyTable, seekPrefix, nsSummary); + } + + return keyDataSizeWithReplica; + } + + + public long handleDirectKeysForOBSLayout(long parentId, boolean withReplica, + boolean listFile, + List duData, + Table keyTable, + String seekPrefix) + throws IOException { + + long keyDataSizeWithReplica = 0L; + + TableIterator> + iterator = keyTable.iterator(); + + iterator.seek(seekPrefix); + + while (iterator.hasNext()) { + // KeyName : OmKeyInfo-Object + Table.KeyValue kv = iterator.next(); + String dbKey = kv.getKey(); + + // Exit loop if the key doesn't match the seekPrefix. + if (!dbKey.startsWith(seekPrefix)) { + break; + } + + OmKeyInfo keyInfo = kv.getValue(); + if (keyInfo != null) { + DUResponse.DiskUsage diskUsage = new DUResponse.DiskUsage(); + String objectName = keyInfo.getKeyName(); + diskUsage.setSubpath(objectName); + diskUsage.setKey(true); + diskUsage.setSize(keyInfo.getDataSize()); + + if (withReplica) { + long keyDU = keyInfo.getReplicatedSize(); + keyDataSizeWithReplica += keyDU; + diskUsage.setSizeWithReplica(keyDU); + } + // List all the keys for the OBS bucket if requested. + if (listFile) { + duData.add(diskUsage); + } + } + } + + return keyDataSizeWithReplica; + } + + public long handleDirectKeysForFSOLayout(long parentId, boolean withReplica, + boolean listFile, + String normalizedPath, + List duData, + Table keyTable, + String seekPrefix, + NSSummary nsSummary) throws IOException { + + long keyDataSizeWithReplica = 0L; + if (omBucketInfo.getObjectID() != parentId) { String dirName = nsSummary.getDirName(); seekPrefix += dirName; @@ -267,6 +336,7 @@ public long handleDirectKeys(long parentId, boolean withReplica, return keyDataSizeWithReplica; } + /** * Given a valid path request for a directory, * return the directory object ID. diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NSSummary.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NSSummary.java index c0f93aebe97d..ac0d19b09360 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NSSummary.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NSSummary.java @@ -36,22 +36,25 @@ public class NSSummary { private int[] fileSizeBucket; private Set childDir; private String dirName; + private boolean isObjectStoreEntity; public NSSummary() { this(0, 0L, new int[ReconConstants.NUM_OF_FILE_SIZE_BINS], - new HashSet<>(), ""); + new HashSet<>(), "", false); } public NSSummary(int numOfFiles, long sizeOfFiles, int[] bucket, Set childDir, - String dirName) { + String dirName, + boolean isObjectStore) { this.numOfFiles = numOfFiles; this.sizeOfFiles = sizeOfFiles; setFileSizeBucket(bucket); this.childDir = childDir; this.dirName = dirName; + this.isObjectStoreEntity = isObjectStore; } public int getNumOfFiles() { @@ -95,6 +98,14 @@ public void setDirName(String dirName) { this.dirName = removeTrailingSlashIfNeeded(dirName); } + public boolean isObjectStore() { + return isObjectStoreEntity; + } + + public void setIsObjectStore(boolean isObjectStoreEntity) { + this.isObjectStoreEntity = isObjectStoreEntity; + } + public void addChildDir(long childId) { if (this.childDir.contains(childId)) { return; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java index 09e0b2587934..635872dc70d0 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java @@ -18,13 +18,9 @@ package org.apache.hadoop.ozone.recon.codec; -import org.apache.hadoop.hdds.utils.db.IntegerCodec; -import org.apache.hadoop.hdds.utils.db.LongCodec; -import org.apache.hadoop.hdds.utils.db.ShortCodec; -import org.apache.hadoop.hdds.utils.db.StringCodec; +import org.apache.hadoop.hdds.utils.db.*; import org.apache.hadoop.ozone.recon.ReconConstants; import org.apache.hadoop.ozone.recon.api.types.NSSummary; -import org.apache.hadoop.hdds.utils.db.Codec; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; @@ -49,6 +45,7 @@ public static Codec get() { private final Codec shortCodec = ShortCodec.get(); private final Codec longCodec = LongCodec.get(); private final Codec stringCodec = StringCodec.get(); + private final Codec booleanCodec = BooleanCodec.get(); // 1 int fields + 41-length int array // + 2 dummy field to track list size/dirName length private static final int NUM_OF_INTS = @@ -64,10 +61,13 @@ public byte[] toPersistedFormat(NSSummary object) throws IOException { String dirName = object.getDirName(); int stringLen = dirName.getBytes(StandardCharsets.UTF_8).length; int numOfChildDirs = childDirs.size(); + boolean isObjectStoreEntity = object.isObjectStore(); final int resSize = NUM_OF_INTS * Integer.BYTES + (numOfChildDirs + 1) * Long.BYTES // 1 long field + list size + Short.BYTES // 2 dummy shorts to track length - + stringLen; // directory name length + + stringLen // directory name length + + 0; // for isObjectStoreEntity as boolean + ByteArrayOutputStream out = new ByteArrayOutputStream(resSize); out.write(integerCodec.toPersistedFormat(object.getNumOfFiles())); @@ -84,6 +84,7 @@ public byte[] toPersistedFormat(NSSummary object) throws IOException { } out.write(integerCodec.toPersistedFormat(stringLen)); out.write(stringCodec.toPersistedFormat(dirName)); + out.write(booleanCodec.toPersistedFormat(isObjectStoreEntity)); return out.toByteArray(); } @@ -117,6 +118,8 @@ public NSSummary fromPersistedFormat(byte[] rawData) throws IOException { assert (bytesRead == strLen); String dirName = stringCodec.fromPersistedFormat(buffer); res.setDirName(dirName); + boolean isObjectStoreEntity = in.readBoolean(); // Read boolean directly + res.setIsObjectStore(isObjectStoreEntity); // Set isObjectStoreEntity return res; } @@ -128,6 +131,7 @@ public NSSummary copyObject(NSSummary object) { copy.setFileSizeBucket(object.getFileSizeBucket()); copy.setChildDir(object.getChildDir()); copy.setDirName(object.getDirName()); + copy.setIsObjectStore(object.isObjectStore()); // Copy isObjectStoreEntity return copy; } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java index 5c3395084464..61cc8ad10c25 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java @@ -37,8 +37,6 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.Callable; import java.util.concurrent.Future; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.TimeUnit; /** * Task to query data from OMDB and write into Recon RocksDB. @@ -69,7 +67,6 @@ public class NSSummaryTask implements ReconOmTask { private final ReconOMMetadataManager reconOMMetadataManager; private final NSSummaryTaskWithFSO nsSummaryTaskWithFSO; private final NSSummaryTaskWithLegacy nsSummaryTaskWithLegacy; - private final NSSummaryTaskWithOBS nsSummaryTaskWithOBS; private final OzoneConfiguration ozoneConfiguration; @Inject @@ -88,9 +85,6 @@ public NSSummaryTask(ReconNamespaceSummaryManager this.nsSummaryTaskWithLegacy = new NSSummaryTaskWithLegacy( reconNamespaceSummaryManager, reconOMMetadataManager, ozoneConfiguration); - this.nsSummaryTaskWithOBS = new NSSummaryTaskWithOBS( - reconNamespaceSummaryManager, - reconOMMetadataManager, ozoneConfiguration); } @Override @@ -100,28 +94,20 @@ public String getTaskName() { @Override public Pair process(OMUpdateEventBatch events) { - boolean success = nsSummaryTaskWithFSO.processWithFSO(events); - if (!success) { + boolean success; + success = nsSummaryTaskWithFSO.processWithFSO(events); + if (success) { + success = nsSummaryTaskWithLegacy.processWithLegacy(events); + } else { LOG.error("processWithFSO failed."); } - success = nsSummaryTaskWithLegacy.processWithLegacy(events); - if (!success) { - LOG.error("processWithLegacy failed."); - } - success = nsSummaryTaskWithOBS.processWithOBS(events); - if (!success) { - LOG.error("processWithOBS failed."); - } return new ImmutablePair<>(getTaskName(), success); } @Override public Pair reprocess(OMMetadataManager omMetadataManager) { - // Initialize a list of tasks to run in parallel Collection> tasks = new ArrayList<>(); - long startTime = System.nanoTime(); // Record start time - try { // reinit Recon RocksDB's namespace CF. reconNamespaceSummaryManager.clearNSSummaryTable(); @@ -135,15 +121,12 @@ public Pair reprocess(OMMetadataManager omMetadataManager) { .reprocessWithFSO(omMetadataManager)); tasks.add(() -> nsSummaryTaskWithLegacy .reprocessWithLegacy(reconOMMetadataManager)); - tasks.add(() -> nsSummaryTaskWithOBS - .reprocessWithOBS(reconOMMetadataManager)); List> results; - ThreadFactory threadFactory = new ThreadFactoryBuilder() - .setNameFormat("Recon-NSSummaryTask-%d") - .build(); - ExecutorService executorService = Executors.newFixedThreadPool(2, - threadFactory); + ExecutorService executorService = Executors + .newFixedThreadPool(2, + new ThreadFactoryBuilder().setNameFormat("NSSummaryTask - %d") + .build()); try { results = executorService.invokeAll(tasks); for (int i = 0; i < results.size(); i++) { @@ -152,25 +135,17 @@ public Pair reprocess(OMMetadataManager omMetadataManager) { } } } catch (InterruptedException ex) { - LOG.error("Error while reprocessing NSSummary table in Recon DB.", ex); + LOG.error("Error while reprocessing NSSummary " + + "table in Recon DB. ", ex); return new ImmutablePair<>(getTaskName(), false); } catch (ExecutionException ex2) { - LOG.error("Error while reprocessing NSSummary table in Recon DB.", ex2); + LOG.error("Error while reprocessing NSSummary " + + "table in Recon DB. ", ex2); return new ImmutablePair<>(getTaskName(), false); } finally { executorService.shutdown(); - - long endTime = System.nanoTime(); - // Convert to milliseconds - long durationInMillis = - TimeUnit.NANOSECONDS.toMillis(endTime - startTime); - - // Log performance metrics - LOG.info("Task execution time: {} milliseconds", durationInMillis); } - return new ImmutablePair<>(getTaskName(), true); } - } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java index f00d83e64a52..e5b8a7887c42 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java @@ -87,7 +87,7 @@ protected void writeNSSummariesToDB(Map nsSummaryMap) } protected void handlePutKeyEvent(OmKeyInfo keyInfo, Map nsSummaryMap) throws IOException { + NSSummary> nsSummaryMap, boolean isObjectStore) throws IOException { long parentObjectId = keyInfo.getParentObjectID(); // Try to get the NSSummary from our local map that maps NSSummaries to IDs NSSummary nsSummary = nsSummaryMap.get(parentObjectId); @@ -110,6 +110,7 @@ protected void handlePutKeyEvent(OmKeyInfo keyInfo, Map kv = keyTableIter.next(); OmKeyInfo keyInfo = kv.getValue(); - handlePutKeyEvent(keyInfo, nsSummaryMap); + handlePutKeyEvent(keyInfo, nsSummaryMap, true); if (!checkAndCallFlushToDB(nsSummaryMap)) { return false; } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java index ec1ccd0542fc..357b263bd28b 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java @@ -115,12 +115,13 @@ public boolean processWithLegacy(OMUpdateEventBatch events) { continue; } - setKeyParentID(updatedKeyInfo); +// setKeyParentID(updatedKeyInfo); if (!updatedKeyInfo.getKeyName().endsWith(OM_KEY_PREFIX)) { switch (action) { case PUT: - handlePutKeyEvent(updatedKeyInfo, nsSummaryMap); + handlePutKeyEvent(updatedKeyInfo, nsSummaryMap, + !enableFileSystemPaths); break; case DELETE: @@ -130,13 +131,14 @@ public boolean processWithLegacy(OMUpdateEventBatch events) { case UPDATE: if (oldKeyInfo != null) { // delete first, then put - setKeyParentID(oldKeyInfo); +// setKeyParentID(oldKeyInfo); handleDeleteKeyEvent(oldKeyInfo, nsSummaryMap); } else { LOG.warn("Update event does not have the old keyInfo for {}.", updatedKey); } - handlePutKeyEvent(updatedKeyInfo, nsSummaryMap); + handlePutKeyEvent(updatedKeyInfo, nsSummaryMap, + !enableFileSystemPaths); break; default: @@ -231,23 +233,30 @@ public boolean reprocessWithLegacy(OMMetadataManager omMetadataManager) { OmBucketInfo omBucketInfo = omMetadataManager .getBucketTable().getSkipCache(bucketDBKey); - if (omBucketInfo.getBucketLayout() - .isObjectStore(enableFileSystemPaths)) { + // Skip if Bucket is not a Legacy bucket. + if (omBucketInfo.getBucketLayout() != BUCKET_LAYOUT) { continue; } - setKeyParentID(keyInfo); - - if (keyInfo.getKeyName().endsWith(OM_KEY_PREFIX)) { - OmDirectoryInfo directoryInfo = - new OmDirectoryInfo.Builder() - .setName(keyInfo.getKeyName()) - .setObjectID(keyInfo.getObjectID()) - .setParentObjectID(keyInfo.getParentObjectID()) - .build(); - handlePutDirEvent(directoryInfo, nsSummaryMap); + if (enableFileSystemPaths) { + // The LEGACY bucket is a file system bucket. + setParentDirectoryId(keyInfo); + + if (keyInfo.getKeyName().endsWith(OM_KEY_PREFIX)) { + OmDirectoryInfo directoryInfo = + new OmDirectoryInfo.Builder() + .setName(keyInfo.getKeyName()) + .setObjectID(keyInfo.getObjectID()) + .setParentObjectID(keyInfo.getParentObjectID()) + .build(); + handlePutDirEvent(directoryInfo, nsSummaryMap); + } else { + handlePutKeyEvent(keyInfo, nsSummaryMap,false); + } } else { - handlePutKeyEvent(keyInfo, nsSummaryMap); + // The LEGACY bucket is an object store bucket. + setParentBucketId(keyInfo); + handlePutKeyEvent(keyInfo, nsSummaryMap, true); } if (!checkAndCallFlushToDB(nsSummaryMap)) { return false; @@ -275,7 +284,7 @@ public boolean reprocessWithLegacy(OMMetadataManager omMetadataManager) { * @param keyInfo * @throws IOException */ - private void setKeyParentID(OmKeyInfo keyInfo) throws IOException { + private void setParentDirectoryId(OmKeyInfo keyInfo) throws IOException { String[] keyPath = keyInfo.getKeyName().split(OM_KEY_PREFIX); // If the path contains only one key then keyPath.length @@ -300,17 +309,27 @@ private void setKeyParentID(OmKeyInfo keyInfo) throws IOException { "NSSummaryTaskWithLegacy is null"); } } else { - String bucketKey = getReconOMMetadataManager() - .getBucketKey(keyInfo.getVolumeName(), keyInfo.getBucketName()); - OmBucketInfo parentBucketInfo = - getReconOMMetadataManager().getBucketTable().getSkipCache(bucketKey); + setParentBucketId(keyInfo); + } + } - if (parentBucketInfo != null) { - keyInfo.setParentObjectID(parentBucketInfo.getObjectID()); - } else { - throw new IOException("ParentKeyInfo for " + - "NSSummaryTaskWithLegacy is null"); - } + /** + * Set the parent object ID for a bucket. + *@paramkeyInfo + *@throwsIOException + */ + private void setParentBucketId(OmKeyInfo keyInfo) + throws IOException { + String bucketKey = getReconOMMetadataManager() + .getBucketKey(keyInfo.getVolumeName(), keyInfo.getBucketName()); + OmBucketInfo parentBucketInfo = + getReconOMMetadataManager().getBucketTable().getSkipCache(bucketKey); + + if (parentBucketInfo != null) { + keyInfo.setParentObjectID(parentBucketInfo.getObjectID()); + } else { + throw new IOException("ParentKeyInfo for " + + "NSSummaryTaskWithLegacy is null"); } } } From fdad043e4d51aa2fa61fcf154289977b8cd7f352 Mon Sep 17 00:00:00 2001 From: arafat Date: Tue, 31 Oct 2023 15:32:00 +0530 Subject: [PATCH 02/19] Removed the extra parameter added to NSSummary --- .../hadoop/ozone/recon/api/types/NSSummary.java | 15 ++------------- .../hadoop/ozone/recon/codec/NSSummaryCodec.java | 16 ++++++---------- 2 files changed, 8 insertions(+), 23 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NSSummary.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NSSummary.java index ac0d19b09360..c0f93aebe97d 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NSSummary.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NSSummary.java @@ -36,25 +36,22 @@ public class NSSummary { private int[] fileSizeBucket; private Set childDir; private String dirName; - private boolean isObjectStoreEntity; public NSSummary() { this(0, 0L, new int[ReconConstants.NUM_OF_FILE_SIZE_BINS], - new HashSet<>(), "", false); + new HashSet<>(), ""); } public NSSummary(int numOfFiles, long sizeOfFiles, int[] bucket, Set childDir, - String dirName, - boolean isObjectStore) { + String dirName) { this.numOfFiles = numOfFiles; this.sizeOfFiles = sizeOfFiles; setFileSizeBucket(bucket); this.childDir = childDir; this.dirName = dirName; - this.isObjectStoreEntity = isObjectStore; } public int getNumOfFiles() { @@ -98,14 +95,6 @@ public void setDirName(String dirName) { this.dirName = removeTrailingSlashIfNeeded(dirName); } - public boolean isObjectStore() { - return isObjectStoreEntity; - } - - public void setIsObjectStore(boolean isObjectStoreEntity) { - this.isObjectStoreEntity = isObjectStoreEntity; - } - public void addChildDir(long childId) { if (this.childDir.contains(childId)) { return; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java index 635872dc70d0..09e0b2587934 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java @@ -18,9 +18,13 @@ package org.apache.hadoop.ozone.recon.codec; -import org.apache.hadoop.hdds.utils.db.*; +import org.apache.hadoop.hdds.utils.db.IntegerCodec; +import org.apache.hadoop.hdds.utils.db.LongCodec; +import org.apache.hadoop.hdds.utils.db.ShortCodec; +import org.apache.hadoop.hdds.utils.db.StringCodec; import org.apache.hadoop.ozone.recon.ReconConstants; import org.apache.hadoop.ozone.recon.api.types.NSSummary; +import org.apache.hadoop.hdds.utils.db.Codec; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; @@ -45,7 +49,6 @@ public static Codec get() { private final Codec shortCodec = ShortCodec.get(); private final Codec longCodec = LongCodec.get(); private final Codec stringCodec = StringCodec.get(); - private final Codec booleanCodec = BooleanCodec.get(); // 1 int fields + 41-length int array // + 2 dummy field to track list size/dirName length private static final int NUM_OF_INTS = @@ -61,13 +64,10 @@ public byte[] toPersistedFormat(NSSummary object) throws IOException { String dirName = object.getDirName(); int stringLen = dirName.getBytes(StandardCharsets.UTF_8).length; int numOfChildDirs = childDirs.size(); - boolean isObjectStoreEntity = object.isObjectStore(); final int resSize = NUM_OF_INTS * Integer.BYTES + (numOfChildDirs + 1) * Long.BYTES // 1 long field + list size + Short.BYTES // 2 dummy shorts to track length - + stringLen // directory name length - + 0; // for isObjectStoreEntity as boolean - + + stringLen; // directory name length ByteArrayOutputStream out = new ByteArrayOutputStream(resSize); out.write(integerCodec.toPersistedFormat(object.getNumOfFiles())); @@ -84,7 +84,6 @@ public byte[] toPersistedFormat(NSSummary object) throws IOException { } out.write(integerCodec.toPersistedFormat(stringLen)); out.write(stringCodec.toPersistedFormat(dirName)); - out.write(booleanCodec.toPersistedFormat(isObjectStoreEntity)); return out.toByteArray(); } @@ -118,8 +117,6 @@ public NSSummary fromPersistedFormat(byte[] rawData) throws IOException { assert (bytesRead == strLen); String dirName = stringCodec.fromPersistedFormat(buffer); res.setDirName(dirName); - boolean isObjectStoreEntity = in.readBoolean(); // Read boolean directly - res.setIsObjectStore(isObjectStoreEntity); // Set isObjectStoreEntity return res; } @@ -131,7 +128,6 @@ public NSSummary copyObject(NSSummary object) { copy.setFileSizeBucket(object.getFileSizeBucket()); copy.setChildDir(object.getChildDir()); copy.setDirName(object.getDirName()); - copy.setIsObjectStore(object.isObjectStore()); // Copy isObjectStoreEntity return copy; } } From 0e1fe0d65642e1d92e9efb718936d1edf7e48001 Mon Sep 17 00:00:00 2001 From: arafat Date: Thu, 2 Nov 2023 01:12:39 +0530 Subject: [PATCH 03/19] Modified the process method of NSSummaryTaskWitLegacy to handle Legacy buckets with OBS layout --- .../recon/TestReconWithOzoneManagerFSO.java | 2 +- .../recon/api/handlers/BucketHandler.java | 2 + .../api/handlers/LegacyBucketHandler.java | 218 ++++++++++++---- .../tasks/NSSummaryTaskDbEventHandler.java | 3 +- .../recon/tasks/NSSummaryTaskWithFSO.java | 6 +- .../recon/tasks/NSSummaryTaskWithLegacy.java | 242 ++++++++++-------- 6 files changed, 316 insertions(+), 157 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerFSO.java index d9848a912990..d638f398c5ba 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerFSO.java @@ -124,7 +124,7 @@ public void testNamespaceSummaryAPI() throws Exception { OzoneStorageContainerManager reconSCM = cluster.getReconServer().getReconStorageContainerManager(); NSSummaryEndpoint endpoint = new NSSummaryEndpoint(namespaceSummaryManager, - omMetadataManagerInstance, reconSCM); + omMetadataManagerInstance, reconSCM,new OzoneConfiguration()); Response basicInfo = endpoint.getBasicInfo("/vol1/bucket1/dir1"); NamespaceSummaryResponse entity = (NamespaceSummaryResponse) basicInfo.getEntity(); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java index 34dcba40f81b..13f64dc1d7a3 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java @@ -17,9 +17,11 @@ */ package org.apache.hadoop.ozone.recon.api.handlers; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.container.ContainerManager; import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java index bb9babb51d67..2edd50afc93d 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java @@ -18,9 +18,11 @@ package org.apache.hadoop.ozone.recon.api.handlers; import com.google.common.base.Preconditions; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; +import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; @@ -51,6 +53,8 @@ public class LegacyBucketHandler extends BucketHandler { private final String vol; private final String bucket; private final OmBucketInfo omBucketInfo; + private final OzoneConfiguration configuration; + private final boolean enableFileSystemPaths; public LegacyBucketHandler( ReconNamespaceSummaryManager reconNamespaceSummaryManager, @@ -62,6 +66,10 @@ public LegacyBucketHandler( this.omBucketInfo = bucketInfo; this.vol = omBucketInfo.getVolumeName(); this.bucket = omBucketInfo.getBucketName(); + this.configuration = new OzoneConfiguration(); + this.enableFileSystemPaths = configuration + .getBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, + OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS_DEFAULT); } /** @@ -212,58 +220,181 @@ public long handleDirectKeys(long parentId, boolean withReplica, return 0; } - if (nsSummary.isObjectStore()) { - keyDataSizeWithReplica += handleDirectKeysForOBSLayout( - parentId, withReplica, listFile, duData, keyTable, seekPrefix ); - } else { + if (enableFileSystemPaths) { keyDataSizeWithReplica += handleDirectKeysForFSOLayout( - parentId, withReplica, listFile, normalizedPath,duData, keyTable, seekPrefix, nsSummary); + parentId, withReplica, listFile, normalizedPath, duData, keyTable, + seekPrefix, nsSummary); + } else { + keyDataSizeWithReplica += handleDirectKeysForOBSLayout( + parentId, withReplica, listFile, duData, keyTable, seekPrefix); } return keyDataSizeWithReplica; } +// public long handleDirectKeysForOBSLayout(long parentId, boolean withReplica, +// boolean listFile, +// List duData, +// Table keyTable, +// String seekPrefix) +// throws IOException { +// +// long keyDataSizeWithReplica = 0L; +// +// try (TableIterator> +// iterator = keyTable.iterator()) { +// iterator.seek(seekPrefix); +// +// while (iterator.hasNext()) { +// // KeyName : OmKeyInfo-Object +// Table.KeyValue kv = iterator.next(); +// String dbKey = kv.getKey(); +// +// // Exit loop if the key doesn't match the seekPrefix. +// if (!dbKey.startsWith(seekPrefix)) { +// break; +// } +// +// OmKeyInfo keyInfo = kv.getValue(); +// if (keyInfo != null) { +// DUResponse.DiskUsage diskUsage = new DUResponse.DiskUsage(); +// String objectName = keyInfo.getKeyName(); +// diskUsage.setSubpath(objectName); +// diskUsage.setKey(true); +// diskUsage.setSize(keyInfo.getDataSize()); +// +// if (withReplica) { +// long keyDU = keyInfo.getReplicatedSize(); +// keyDataSizeWithReplica += keyDU; +// diskUsage.setSizeWithReplica(keyDU); +// } +// // List all the keys for the OBS bucket if requested. +// if (listFile) { +// duData.add(diskUsage); +// } +// } +// } +// } +// +// return keyDataSizeWithReplica; +// } + +// public long handleDirectKeysForFSOLayout(long parentId, boolean withReplica, +// boolean listFile, +// String normalizedPath, +// List duData, +// Table keyTable, +// String seekPrefix, +// NSSummary nsSummary) +// throws IOException { +// +// long keyDataSizeWithReplica = 0L; +// +// if (omBucketInfo.getObjectID() != parentId) { +// String dirName = nsSummary.getDirName(); +// seekPrefix += dirName; +// } +// String[] seekKeys = seekPrefix.split(OM_KEY_PREFIX); +// try (TableIterator> +// iterator = keyTable.iterator()) { +// +// iterator.seek(seekPrefix); +// +// while (iterator.hasNext()) { +// Table.KeyValue kv = iterator.next(); +// String dbKey = kv.getKey(); +// +// if (!dbKey.startsWith(seekPrefix)) { +// break; +// } +// +// String[] keys = dbKey.split(OM_KEY_PREFIX); +// +// // iteration moved to the next level +// // and not handling direct keys +// if (keys.length - seekKeys.length > 1) { +// continue; +// } +// +// OmKeyInfo keyInfo = kv.getValue(); +// if (keyInfo != null) { +// // skip directory markers, just include directKeys +// if (keyInfo.getKeyName().endsWith(OM_KEY_PREFIX)) { +// continue; +// } +// DUResponse.DiskUsage diskUsage = new DUResponse.DiskUsage(); +// String subpath = buildSubpath(normalizedPath, +// keyInfo.getFileName()); +// diskUsage.setSubpath(subpath); +// diskUsage.setKey(true); +// diskUsage.setSize(keyInfo.getDataSize()); +// +// if (withReplica) { +// long keyDU = keyInfo.getReplicatedSize(); +// keyDataSizeWithReplica += keyDU; +// diskUsage.setSizeWithReplica(keyDU); +// } +// // list the key as a subpath +// if (listFile) { +// duData.add(diskUsage); +// } +// } +// } +// } +// +// return keyDataSizeWithReplica; +// } + + // Create a method to generate DUResponse.DiskUsage objects + private DUResponse.DiskUsage createDiskUsage(OmKeyInfo keyInfo, boolean withReplica, boolean listFile, List duData) { + DUResponse.DiskUsage diskUsage = new DUResponse.DiskUsage(); + String objectName = keyInfo.getKeyName(); + diskUsage.setSubpath(objectName); + diskUsage.setKey(true); + diskUsage.setSize(keyInfo.getDataSize()); + + if (withReplica) { + long keyDU = keyInfo.getReplicatedSize(); + diskUsage.setSizeWithReplica(keyDU); + } + + if (listFile) { + duData.add(diskUsage); + } + + return diskUsage; + } + + // Update your existing methods to use the new createDiskUsage method public long handleDirectKeysForOBSLayout(long parentId, boolean withReplica, boolean listFile, List duData, Table keyTable, String seekPrefix) throws IOException { - long keyDataSizeWithReplica = 0L; - TableIterator> - iterator = keyTable.iterator(); - - iterator.seek(seekPrefix); - while (iterator.hasNext()) { - // KeyName : OmKeyInfo-Object - Table.KeyValue kv = iterator.next(); - String dbKey = kv.getKey(); + try ( + TableIterator> iterator = keyTable.iterator()) { + iterator.seek(seekPrefix); - // Exit loop if the key doesn't match the seekPrefix. - if (!dbKey.startsWith(seekPrefix)) { - break; - } + while (iterator.hasNext()) { + Table.KeyValue kv = iterator.next(); + String dbKey = kv.getKey(); - OmKeyInfo keyInfo = kv.getValue(); - if (keyInfo != null) { - DUResponse.DiskUsage diskUsage = new DUResponse.DiskUsage(); - String objectName = keyInfo.getKeyName(); - diskUsage.setSubpath(objectName); - diskUsage.setKey(true); - diskUsage.setSize(keyInfo.getDataSize()); - - if (withReplica) { - long keyDU = keyInfo.getReplicatedSize(); - keyDataSizeWithReplica += keyDU; - diskUsage.setSizeWithReplica(keyDU); + if (!dbKey.startsWith(seekPrefix)) { + break; } - // List all the keys for the OBS bucket if requested. - if (listFile) { - duData.add(diskUsage); + + OmKeyInfo keyInfo = kv.getValue(); + if (keyInfo != null) { + createDiskUsage(keyInfo, withReplica, listFile, duData); + if (withReplica) { + long keyDU = keyInfo.getReplicatedSize(); + keyDataSizeWithReplica += keyDU; + } } } } @@ -277,8 +408,8 @@ public long handleDirectKeysForFSOLayout(long parentId, boolean withReplica, List duData, Table keyTable, String seekPrefix, - NSSummary nsSummary) throws IOException { - + NSSummary nsSummary) + throws IOException { long keyDataSizeWithReplica = 0L; if (omBucketInfo.getObjectID() != parentId) { @@ -286,8 +417,8 @@ public long handleDirectKeysForFSOLayout(long parentId, boolean withReplica, seekPrefix += dirName; } String[] seekKeys = seekPrefix.split(OM_KEY_PREFIX); - try (TableIterator> - iterator = keyTable.iterator()) { + try ( + TableIterator> iterator = keyTable.iterator()) { iterator.seek(seekPrefix); @@ -313,21 +444,10 @@ public long handleDirectKeysForFSOLayout(long parentId, boolean withReplica, if (keyInfo.getKeyName().endsWith(OM_KEY_PREFIX)) { continue; } - DUResponse.DiskUsage diskUsage = new DUResponse.DiskUsage(); - String subpath = buildSubpath(normalizedPath, - keyInfo.getFileName()); - diskUsage.setSubpath(subpath); - diskUsage.setKey(true); - diskUsage.setSize(keyInfo.getDataSize()); - + createDiskUsage(keyInfo, withReplica, listFile, duData); if (withReplica) { long keyDU = keyInfo.getReplicatedSize(); keyDataSizeWithReplica += keyDU; - diskUsage.setSizeWithReplica(keyDU); - } - // list the key as a subpath - if (listFile) { - duData.add(diskUsage); } } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java index e5b8a7887c42..f00d83e64a52 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java @@ -87,7 +87,7 @@ protected void writeNSSummariesToDB(Map nsSummaryMap) } protected void handlePutKeyEvent(OmKeyInfo keyInfo, Map nsSummaryMap, boolean isObjectStore) throws IOException { + NSSummary> nsSummaryMap) throws IOException { long parentObjectId = keyInfo.getParentObjectID(); // Try to get the NSSummary from our local map that maps NSSummaries to IDs NSSummary nsSummary = nsSummaryMap.get(parentObjectId); @@ -110,7 +110,6 @@ protected void handlePutKeyEvent(OmKeyInfo keyInfo, Map kv = keyTableIter.next(); OmKeyInfo keyInfo = kv.getValue(); - handlePutKeyEvent(keyInfo, nsSummaryMap, true); + handlePutKeyEvent(keyInfo, nsSummaryMap); if (!checkAndCallFlushToDB(nsSummaryMap)) { return false; } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java index 357b263bd28b..8120ed5b9177 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java @@ -71,16 +71,17 @@ public NSSummaryTaskWithLegacy(ReconNamespaceSummaryManager public boolean processWithLegacy(OMUpdateEventBatch events) { Iterator eventIterator = events.getIterator(); Map nsSummaryMap = new HashMap<>(); + ReconOMMetadataManager metadataManager = getReconOMMetadataManager(); while (eventIterator.hasNext()) { - OMDBUpdateEvent omdbUpdateEvent = eventIterator.next(); + OMDBUpdateEvent omdbUpdateEvent = + eventIterator.next(); OMDBUpdateEvent.OMDBUpdateAction action = omdbUpdateEvent.getAction(); // we only process updates on OM's KeyTable String table = omdbUpdateEvent.getTable(); - boolean updateOnKeyTable = table.equals(KEY_TABLE); - if (!updateOnKeyTable) { + + if (!table.equals(KEY_TABLE)) { continue; } @@ -90,104 +91,26 @@ public boolean processWithLegacy(OMUpdateEventBatch events) { OMDBUpdateEvent keyTableUpdateEvent = omdbUpdateEvent; Object value = keyTableUpdateEvent.getValue(); Object oldValue = keyTableUpdateEvent.getOldValue(); + if (!(value instanceof OmKeyInfo)) { LOG.warn("Unexpected value type {} for key {}. Skipping processing.", value.getClass().getName(), updatedKey); continue; } + OmKeyInfo updatedKeyInfo = (OmKeyInfo) value; OmKeyInfo oldKeyInfo = (OmKeyInfo) oldValue; - // KeyTable entries belong to both Legacy and OBS buckets. - // Check bucket layout and if it's OBS - // continue to the next iteration. - // Check just for the current KeyInfo. - String volumeName = updatedKeyInfo.getVolumeName(); - String bucketName = updatedKeyInfo.getBucketName(); - String bucketDBKey = getReconOMMetadataManager() - .getBucketKey(volumeName, bucketName); - // Get bucket info from bucket table - OmBucketInfo omBucketInfo = getReconOMMetadataManager() - .getBucketTable().getSkipCache(bucketDBKey); - - if (omBucketInfo.getBucketLayout() - .isObjectStore(enableFileSystemPaths)) { + if (!isBucketLayoutValid(metadataManager, updatedKeyInfo)) { continue; } -// setKeyParentID(updatedKeyInfo); - - if (!updatedKeyInfo.getKeyName().endsWith(OM_KEY_PREFIX)) { - switch (action) { - case PUT: - handlePutKeyEvent(updatedKeyInfo, nsSummaryMap, - !enableFileSystemPaths); - break; - - case DELETE: - handleDeleteKeyEvent(updatedKeyInfo, nsSummaryMap); - break; - - case UPDATE: - if (oldKeyInfo != null) { - // delete first, then put -// setKeyParentID(oldKeyInfo); - handleDeleteKeyEvent(oldKeyInfo, nsSummaryMap); - } else { - LOG.warn("Update event does not have the old keyInfo for {}.", - updatedKey); - } - handlePutKeyEvent(updatedKeyInfo, nsSummaryMap, - !enableFileSystemPaths); - break; - - default: - LOG.debug("Skipping DB update event : {}", - omdbUpdateEvent.getAction()); - } + if (enableFileSystemPaths) { + processWithFileSystemLayout(updatedKeyInfo, oldKeyInfo, action, + nsSummaryMap); } else { - OmDirectoryInfo updatedDirectoryInfo = - new OmDirectoryInfo.Builder() - .setName(updatedKeyInfo.getKeyName()) - .setObjectID(updatedKeyInfo.getObjectID()) - .setParentObjectID(updatedKeyInfo.getParentObjectID()) - .build(); - - OmDirectoryInfo oldDirectoryInfo = null; - - if (oldKeyInfo != null) { - oldDirectoryInfo = - new OmDirectoryInfo.Builder() - .setName(oldKeyInfo.getKeyName()) - .setObjectID(oldKeyInfo.getObjectID()) - .setParentObjectID(oldKeyInfo.getParentObjectID()) - .build(); - } - - switch (action) { - case PUT: - handlePutDirEvent(updatedDirectoryInfo, nsSummaryMap); - break; - - case DELETE: - handleDeleteDirEvent(updatedDirectoryInfo, nsSummaryMap); - break; - - case UPDATE: - if (oldDirectoryInfo != null) { - // delete first, then put - handleDeleteDirEvent(oldDirectoryInfo, nsSummaryMap); - } else { - LOG.warn("Update event does not have the old dirInfo for {}.", - updatedKey); - } - handlePutDirEvent(updatedDirectoryInfo, nsSummaryMap); - break; - - default: - LOG.debug("Skipping DB update event : {}", - omdbUpdateEvent.getAction()); - } + processWithObjectStoreLayout(updatedKeyInfo, oldKeyInfo, action, + nsSummaryMap); } } catch (IOException ioEx) { LOG.error("Unable to process Namespace Summary data in Recon DB. ", @@ -208,6 +131,103 @@ public boolean processWithLegacy(OMUpdateEventBatch events) { return true; } + private void processWithFileSystemLayout(OmKeyInfo updatedKeyInfo, + OmKeyInfo oldKeyInfo, + OMDBUpdateEvent.OMDBUpdateAction action, + Map nsSummaryMap) + throws IOException { + setParentDirectoryId(updatedKeyInfo); + + if (!updatedKeyInfo.getKeyName().endsWith(OM_KEY_PREFIX)) { + switch (action) { + case PUT: + handlePutKeyEvent(updatedKeyInfo, nsSummaryMap); + break; + + case DELETE: + handleDeleteKeyEvent(updatedKeyInfo, nsSummaryMap); + break; + + case UPDATE: + if (oldKeyInfo != null) { + setParentDirectoryId(oldKeyInfo); + handleDeleteKeyEvent(oldKeyInfo, nsSummaryMap); + } else { + LOG.warn("Update event does not have the old keyInfo for {}.", + updatedKeyInfo.getKeyName()); + } + handlePutKeyEvent(updatedKeyInfo, nsSummaryMap); + break; + } + } else { + OmDirectoryInfo updatedDirectoryInfo = new OmDirectoryInfo.Builder() + .setName(updatedKeyInfo.getKeyName()) + .setObjectID(updatedKeyInfo.getObjectID()) + .setParentObjectID(updatedKeyInfo.getParentObjectID()) + .build(); + + OmDirectoryInfo oldDirectoryInfo = null; + + if (oldKeyInfo != null) { + oldDirectoryInfo = + new OmDirectoryInfo.Builder() + .setName(oldKeyInfo.getKeyName()) + .setObjectID(oldKeyInfo.getObjectID()) + .setParentObjectID(oldKeyInfo.getParentObjectID()) + .build(); + } + + switch (action) { + case PUT: + handlePutDirEvent(updatedDirectoryInfo, nsSummaryMap); + break; + + case DELETE: + handleDeleteDirEvent(updatedDirectoryInfo, nsSummaryMap); + break; + + case UPDATE: + if (oldDirectoryInfo != null) { + handleDeleteDirEvent(oldDirectoryInfo, nsSummaryMap); + } else { + LOG.warn("Update event does not have the old dirInfo for {}.", + updatedKeyInfo.getKeyName()); + } + handlePutDirEvent(updatedDirectoryInfo, nsSummaryMap); + break; + } + } + } + + private void processWithObjectStoreLayout(OmKeyInfo updatedKeyInfo, + OmKeyInfo oldKeyInfo, + OMDBUpdateEvent.OMDBUpdateAction action, + Map nsSummaryMap) + throws IOException { + setParentBucketId(updatedKeyInfo); + + switch (action) { + case PUT: + handlePutKeyEvent(updatedKeyInfo, nsSummaryMap); + break; + + case DELETE: + handleDeleteKeyEvent(updatedKeyInfo, nsSummaryMap); + break; + + case UPDATE: + if (oldKeyInfo != null) { + setParentBucketId(oldKeyInfo); + handleDeleteKeyEvent(oldKeyInfo, nsSummaryMap); + } else { + LOG.warn("Update event does not have the old keyInfo for {}.", + updatedKeyInfo.getKeyName()); + } + handlePutKeyEvent(updatedKeyInfo, nsSummaryMap); + break; + } + } + public boolean reprocessWithLegacy(OMMetadataManager omMetadataManager) { Map nsSummaryMap = new HashMap<>(); @@ -225,16 +245,8 @@ public boolean reprocessWithLegacy(OMMetadataManager omMetadataManager) { // KeyTable entries belong to both Legacy and OBS buckets. // Check bucket layout and if it's OBS // continue to the next iteration. - String volumeName = keyInfo.getVolumeName(); - String bucketName = keyInfo.getBucketName(); - String bucketDBKey = omMetadataManager - .getBucketKey(volumeName, bucketName); - // Get bucket info from bucket table - OmBucketInfo omBucketInfo = omMetadataManager - .getBucketTable().getSkipCache(bucketDBKey); - - // Skip if Bucket is not a Legacy bucket. - if (omBucketInfo.getBucketLayout() != BUCKET_LAYOUT) { + if (!isBucketLayoutValid((ReconOMMetadataManager) omMetadataManager, + keyInfo)) { continue; } @@ -251,12 +263,12 @@ public boolean reprocessWithLegacy(OMMetadataManager omMetadataManager) { .build(); handlePutDirEvent(directoryInfo, nsSummaryMap); } else { - handlePutKeyEvent(keyInfo, nsSummaryMap,false); + handlePutKeyEvent(keyInfo, nsSummaryMap); } } else { // The LEGACY bucket is an object store bucket. setParentBucketId(keyInfo); - handlePutKeyEvent(keyInfo, nsSummaryMap, true); + handlePutKeyEvent(keyInfo, nsSummaryMap); } if (!checkAndCallFlushToDB(nsSummaryMap)) { return false; @@ -332,4 +344,30 @@ private void setParentBucketId(OmKeyInfo keyInfo) "NSSummaryTaskWithLegacy is null"); } } + + /** + * Check if the bucket layout is LEGACY. + * @param metadataManager + * @param keyInfo + * @return + */ + private boolean isBucketLayoutValid(ReconOMMetadataManager metadataManager, + OmKeyInfo keyInfo) + throws IOException { + String volumeName = keyInfo.getVolumeName(); + String bucketName = keyInfo.getBucketName(); + String bucketDBKey = metadataManager.getBucketKey(volumeName, bucketName); + OmBucketInfo omBucketInfo = + metadataManager.getBucketTable().getSkipCache(bucketDBKey); + + if (omBucketInfo.getBucketLayout() != BUCKET_LAYOUT) { + LOG.debug( + "Skipping processing for bucket {} as bucket layout is not LEGACY", + bucketName); + return false; + } + + return true; + } + } From c86182eb36f352075f17fa46228d64094b606cd2 Mon Sep 17 00:00:00 2001 From: arafat Date: Thu, 2 Nov 2023 01:26:30 +0530 Subject: [PATCH 04/19] Added more java docs and refactoring --- .../recon/TestReconWithOzoneManagerFSO.java | 2 +- .../recon/api/handlers/BucketHandler.java | 2 - .../api/handlers/LegacyBucketHandler.java | 202 +++++------------- 3 files changed, 60 insertions(+), 146 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerFSO.java index d638f398c5ba..d9848a912990 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerFSO.java @@ -124,7 +124,7 @@ public void testNamespaceSummaryAPI() throws Exception { OzoneStorageContainerManager reconSCM = cluster.getReconServer().getReconStorageContainerManager(); NSSummaryEndpoint endpoint = new NSSummaryEndpoint(namespaceSummaryManager, - omMetadataManagerInstance, reconSCM,new OzoneConfiguration()); + omMetadataManagerInstance, reconSCM); Response basicInfo = endpoint.getBasicInfo("/vol1/bucket1/dir1"); NamespaceSummaryResponse entity = (NamespaceSummaryResponse) basicInfo.getEntity(); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java index 13f64dc1d7a3..34dcba40f81b 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java @@ -17,11 +17,9 @@ */ package org.apache.hadoop.ozone.recon.api.handlers; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.container.ContainerManager; import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java index 2edd50afc93d..5360551646f5 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java @@ -221,153 +221,29 @@ public long handleDirectKeys(long parentId, boolean withReplica, } if (enableFileSystemPaths) { - keyDataSizeWithReplica += handleDirectKeysForFSOLayout( - parentId, withReplica, listFile, normalizedPath, duData, keyTable, - seekPrefix, nsSummary); + keyDataSizeWithReplica += + handleDirectKeysForFSOLayout(parentId, withReplica, listFile, duData, + keyTable, seekPrefix, nsSummary); } else { - keyDataSizeWithReplica += handleDirectKeysForOBSLayout( - parentId, withReplica, listFile, duData, keyTable, seekPrefix); + keyDataSizeWithReplica += + handleDirectKeysForOBSLayout(withReplica, listFile, duData, keyTable, + seekPrefix); } return keyDataSizeWithReplica; } - -// public long handleDirectKeysForOBSLayout(long parentId, boolean withReplica, -// boolean listFile, -// List duData, -// Table keyTable, -// String seekPrefix) -// throws IOException { -// -// long keyDataSizeWithReplica = 0L; -// -// try (TableIterator> -// iterator = keyTable.iterator()) { -// iterator.seek(seekPrefix); -// -// while (iterator.hasNext()) { -// // KeyName : OmKeyInfo-Object -// Table.KeyValue kv = iterator.next(); -// String dbKey = kv.getKey(); -// -// // Exit loop if the key doesn't match the seekPrefix. -// if (!dbKey.startsWith(seekPrefix)) { -// break; -// } -// -// OmKeyInfo keyInfo = kv.getValue(); -// if (keyInfo != null) { -// DUResponse.DiskUsage diskUsage = new DUResponse.DiskUsage(); -// String objectName = keyInfo.getKeyName(); -// diskUsage.setSubpath(objectName); -// diskUsage.setKey(true); -// diskUsage.setSize(keyInfo.getDataSize()); -// -// if (withReplica) { -// long keyDU = keyInfo.getReplicatedSize(); -// keyDataSizeWithReplica += keyDU; -// diskUsage.setSizeWithReplica(keyDU); -// } -// // List all the keys for the OBS bucket if requested. -// if (listFile) { -// duData.add(diskUsage); -// } -// } -// } -// } -// -// return keyDataSizeWithReplica; -// } - -// public long handleDirectKeysForFSOLayout(long parentId, boolean withReplica, -// boolean listFile, -// String normalizedPath, -// List duData, -// Table keyTable, -// String seekPrefix, -// NSSummary nsSummary) -// throws IOException { -// -// long keyDataSizeWithReplica = 0L; -// -// if (omBucketInfo.getObjectID() != parentId) { -// String dirName = nsSummary.getDirName(); -// seekPrefix += dirName; -// } -// String[] seekKeys = seekPrefix.split(OM_KEY_PREFIX); -// try (TableIterator> -// iterator = keyTable.iterator()) { -// -// iterator.seek(seekPrefix); -// -// while (iterator.hasNext()) { -// Table.KeyValue kv = iterator.next(); -// String dbKey = kv.getKey(); -// -// if (!dbKey.startsWith(seekPrefix)) { -// break; -// } -// -// String[] keys = dbKey.split(OM_KEY_PREFIX); -// -// // iteration moved to the next level -// // and not handling direct keys -// if (keys.length - seekKeys.length > 1) { -// continue; -// } -// -// OmKeyInfo keyInfo = kv.getValue(); -// if (keyInfo != null) { -// // skip directory markers, just include directKeys -// if (keyInfo.getKeyName().endsWith(OM_KEY_PREFIX)) { -// continue; -// } -// DUResponse.DiskUsage diskUsage = new DUResponse.DiskUsage(); -// String subpath = buildSubpath(normalizedPath, -// keyInfo.getFileName()); -// diskUsage.setSubpath(subpath); -// diskUsage.setKey(true); -// diskUsage.setSize(keyInfo.getDataSize()); -// -// if (withReplica) { -// long keyDU = keyInfo.getReplicatedSize(); -// keyDataSizeWithReplica += keyDU; -// diskUsage.setSizeWithReplica(keyDU); -// } -// // list the key as a subpath -// if (listFile) { -// duData.add(diskUsage); -// } -// } -// } -// } -// -// return keyDataSizeWithReplica; -// } - - // Create a method to generate DUResponse.DiskUsage objects - private DUResponse.DiskUsage createDiskUsage(OmKeyInfo keyInfo, boolean withReplica, boolean listFile, List duData) { - DUResponse.DiskUsage diskUsage = new DUResponse.DiskUsage(); - String objectName = keyInfo.getKeyName(); - diskUsage.setSubpath(objectName); - diskUsage.setKey(true); - diskUsage.setSize(keyInfo.getDataSize()); - - if (withReplica) { - long keyDU = keyInfo.getReplicatedSize(); - diskUsage.setSizeWithReplica(keyDU); - } - - if (listFile) { - duData.add(diskUsage); - } - - return diskUsage; - } - - // Update your existing methods to use the new createDiskUsage method - public long handleDirectKeysForOBSLayout(long parentId, boolean withReplica, + /** + * This method handles disk usage of direct keys for OBS layout. + * @param withReplica if withReplica is enabled, set sizeWithReplica + * @param listFile if listFile is enabled, append key DU as a subpath + * @param duData the current DU data + * @param keyTable the key table + * @param seekPrefix the seek prefix used to position the iterator + * @return the total DU of all direct keys + * @throws IOException + */ + public long handleDirectKeysForOBSLayout(boolean withReplica, boolean listFile, List duData, Table keyTable, @@ -402,9 +278,20 @@ public long handleDirectKeysForOBSLayout(long parentId, boolean withReplica, return keyDataSizeWithReplica; } + /** + * This method handles disk usage of direct keys for OBS layout. + * @param parentId parent directory/bucket + * @param withReplica if withReplica is enabled, set sizeWithReplica + * @param listFile if listFile is enabled, append key DU as a subpath + * @param duData the current DU data + * @param keyTable the key table + * @param seekPrefix the seek prefix used to position the iterator + * @param nsSummary of the parent directory/bucket + * @return the total DU of all direct keys + * @throws IOException + */ public long handleDirectKeysForFSOLayout(long parentId, boolean withReplica, boolean listFile, - String normalizedPath, List duData, Table keyTable, String seekPrefix, @@ -418,7 +305,8 @@ public long handleDirectKeysForFSOLayout(long parentId, boolean withReplica, } String[] seekKeys = seekPrefix.split(OM_KEY_PREFIX); try ( - TableIterator> iterator = keyTable.iterator()) { + TableIterator> + iterator = keyTable.iterator()) { iterator.seek(seekPrefix); @@ -456,6 +344,34 @@ public long handleDirectKeysForFSOLayout(long parentId, boolean withReplica, return keyDataSizeWithReplica; } + /** + * This method handles disk usage calculation for legacy buckets. + * @param keyInfo the key info + * @param withReplica if withReplica is enabled, set sizeWithReplica + * @param listFile if listFile is enabled, append key DU as a subpath + */ + private DUResponse.DiskUsage createDiskUsage(OmKeyInfo keyInfo, + boolean withReplica, + boolean listFile, + List duData) { + DUResponse.DiskUsage diskUsage = new DUResponse.DiskUsage(); + String objectName = keyInfo.getKeyName(); + diskUsage.setSubpath(objectName); + diskUsage.setKey(true); + diskUsage.setSize(keyInfo.getDataSize()); + + if (withReplica) { + long keyDU = keyInfo.getReplicatedSize(); + diskUsage.setSizeWithReplica(keyDU); + } + + if (listFile) { + duData.add(diskUsage); + } + + return diskUsage; + } + /** * Given a valid path request for a directory, From beabd5a3e56319e3c0f83ea291febe50cb975813 Mon Sep 17 00:00:00 2001 From: arafat Date: Tue, 5 Mar 2024 15:38:25 +0530 Subject: [PATCH 05/19] Added java docs and Logs --- .../ozone/recon/tasks/NSSummaryTaskWithLegacy.java | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java index 8120ed5b9177..17ae2571681e 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java @@ -158,6 +158,9 @@ private void processWithFileSystemLayout(OmKeyInfo updatedKeyInfo, } handlePutKeyEvent(updatedKeyInfo, nsSummaryMap); break; + + default: + LOG.debug("Skipping DB update event fir Key: {}", action); } } else { OmDirectoryInfo updatedDirectoryInfo = new OmDirectoryInfo.Builder() @@ -195,6 +198,9 @@ private void processWithFileSystemLayout(OmKeyInfo updatedKeyInfo, } handlePutDirEvent(updatedDirectoryInfo, nsSummaryMap); break; + + default: + LOG.debug("Skipping DB update event for Directory: {}", action); } } } @@ -225,6 +231,9 @@ private void processWithObjectStoreLayout(OmKeyInfo updatedKeyInfo, } handlePutKeyEvent(updatedKeyInfo, nsSummaryMap); break; + + default: + LOG.debug("Skipping DB update event for Key: {}", action); } } From a3db46137800841313b40e9c3d1ea7e26a8a39eb Mon Sep 17 00:00:00 2001 From: arafat Date: Wed, 6 Mar 2024 13:56:22 +0530 Subject: [PATCH 06/19] Using OBSBucketHanlder in case fileSystemPathEnabled flag set to true for Legacy buckets --- .../recon/api/handlers/BucketHandler.java | 21 ++- .../recon/api/handlers/EntityHandler.java | 65 ++++++++- .../api/handlers/LegacyBucketHandler.java | 134 ++---------------- 3 files changed, 89 insertions(+), 131 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java index 34dcba40f81b..7ca0b2832e55 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java @@ -17,9 +17,11 @@ */ package org.apache.hadoop.ozone.recon.api.handlers; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.container.ContainerManager; import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; @@ -163,6 +165,10 @@ public static BucketHandler getBucketHandler( ReconOMMetadataManager omMetadataManager, OzoneStorageContainerManager reconSCM, OmBucketInfo bucketInfo) throws IOException { + OzoneConfiguration configuration = new OzoneConfiguration(); + boolean enableFileSystemPaths = configuration + .getBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, + OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS_DEFAULT); // If bucketInfo is null then entity type is UNKNOWN if (Objects.isNull(bucketInfo)) { @@ -172,10 +178,17 @@ public static BucketHandler getBucketHandler( .equals(BucketLayout.FILE_SYSTEM_OPTIMIZED)) { return new FSOBucketHandler(reconNamespaceSummaryManager, omMetadataManager, reconSCM, bucketInfo); - } else if (bucketInfo.getBucketLayout() - .equals(BucketLayout.LEGACY)) { - return new LegacyBucketHandler(reconNamespaceSummaryManager, - omMetadataManager, reconSCM, bucketInfo); + } else if (bucketInfo.getBucketLayout().equals(BucketLayout.LEGACY)) { + // Choose handler based on enableFileSystemPaths flag for legacy layout. + // If enableFileSystemPaths is false, then the legacy bucket is treated + // as an OBS bucket. + if (enableFileSystemPaths) { + return new LegacyBucketHandler(reconNamespaceSummaryManager, + omMetadataManager, reconSCM, bucketInfo); + } else { + return new OBSBucketHandler(reconNamespaceSummaryManager, + omMetadataManager, reconSCM, bucketInfo); + } } else if (bucketInfo.getBucketLayout() .equals(BucketLayout.OBJECT_STORE)) { return new OBSBucketHandler(reconNamespaceSummaryManager, diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java index d12c7b6545ac..35a160490a4a 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java @@ -17,8 +17,11 @@ */ package org.apache.hadoop.ozone.recon.api.handlers; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; import org.apache.hadoop.ozone.OmUtils; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.recon.ReconConstants; import org.apache.hadoop.ozone.recon.api.types.NamespaceSummaryResponse; import org.apache.hadoop.ozone.recon.api.types.DUResponse; @@ -155,21 +158,36 @@ public static EntityHandler getEntityHandler( } else { // length > 3. check dir or key existence String volName = names[0]; String bucketName = names[1]; - - String keyName = BucketHandler.getKeyName(names); + String keyName; bucketHandler = BucketHandler.getBucketHandler( reconNamespaceSummaryManager, omMetadataManager, reconSCM, volName, bucketName); - // check if either volume or bucket doesn't exist - if (bucketHandler == null - || !omMetadataManager.volumeExists(volName) - || !bucketHandler.bucketExists(volName, bucketName)) { + // Check if the bucketHandler is null or if the volume/bucket does not exist + if (bucketHandler == null || + !omMetadataManager.volumeExists(volName) || + !bucketHandler.bucketExists(volName, bucketName)) { return EntityType.UNKNOWN.create(reconNamespaceSummaryManager, - omMetadataManager, reconSCM, null, path); + omMetadataManager, reconSCM, null, path); } + + // If the bucket layout is OBJECT_STORE, use parseObjectStorePath to get the key name + if (bucketHandler.getBucketLayout() == BucketLayout.OBJECT_STORE) { + String[] parsedObjectStorePath = parseObjectStorePath(path); + if (parsedObjectStorePath == null || + parsedObjectStorePath.length != 3) { + // Handle invalid path format when expecting OBJECT_STORE layout + return EntityType.UNKNOWN.create(reconNamespaceSummaryManager, + omMetadataManager, reconSCM, null, path); + } + keyName = parsedObjectStorePath[2]; + } else { + // For non-OBJECT_STORE layouts, derive the keyName using the existing names array + keyName = BucketHandler.getKeyName(names); + } + return bucketHandler.determineKeyPath(keyName) .create(reconNamespaceSummaryManager, omMetadataManager, reconSCM, bucketHandler, path); @@ -256,6 +274,39 @@ public static String[] parseRequestPath(String path) { return names; } + /** + * Splits an object store path into volume, bucket, and key name components. + *

+ * This method parses a path of the format "/volumeName/bucketName/keyName", + * including paths with additional '/' characters within the key name. It's + * designed for object store paths where the first three '/' characters separate + * the root, volume and bucket names from the key name. + * + * @param path The object store path to parse, starting with a slash. + * @return A String array with three elements: volume name, bucket name, and key name, + * or {@code null} if the path format is invalid. + */ + public static String[] parseObjectStorePath(String path) { + // Ensure the path starts with a slash and has a sufficient length + if (!path.startsWith("/") || path.length() < 2) { + return null; + } + + // Removing the leading slash for correct splitting + path = path.substring(1); + + // Splitting the modified path by "/", limiting to 3 parts + String[] parts = path.split("/", 3); + + // Checking if we correctly obtained 3 parts after removing the leading slash + if (parts.length == 3) { + return parts; + } else { + return null; + } + } + + private static String normalizePath(String path) { return OM_KEY_PREFIX + OmUtils.normalizeKey(path, false); } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java index 5360551646f5..09f1c5bc7454 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java @@ -18,11 +18,9 @@ package org.apache.hadoop.ozone.recon.api.handlers; import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; -import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; @@ -53,8 +51,6 @@ public class LegacyBucketHandler extends BucketHandler { private final String vol; private final String bucket; private final OmBucketInfo omBucketInfo; - private final OzoneConfiguration configuration; - private final boolean enableFileSystemPaths; public LegacyBucketHandler( ReconNamespaceSummaryManager reconNamespaceSummaryManager, @@ -66,10 +62,6 @@ public LegacyBucketHandler( this.omBucketInfo = bucketInfo; this.vol = omBucketInfo.getVolumeName(); this.bucket = omBucketInfo.getBucketName(); - this.configuration = new OzoneConfiguration(); - this.enableFileSystemPaths = configuration - .getBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, - OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS_DEFAULT); } /** @@ -220,93 +212,13 @@ public long handleDirectKeys(long parentId, boolean withReplica, return 0; } - if (enableFileSystemPaths) { - keyDataSizeWithReplica += - handleDirectKeysForFSOLayout(parentId, withReplica, listFile, duData, - keyTable, seekPrefix, nsSummary); - } else { - keyDataSizeWithReplica += - handleDirectKeysForOBSLayout(withReplica, listFile, duData, keyTable, - seekPrefix); - } - - return keyDataSizeWithReplica; - } - - /** - * This method handles disk usage of direct keys for OBS layout. - * @param withReplica if withReplica is enabled, set sizeWithReplica - * @param listFile if listFile is enabled, append key DU as a subpath - * @param duData the current DU data - * @param keyTable the key table - * @param seekPrefix the seek prefix used to position the iterator - * @return the total DU of all direct keys - * @throws IOException - */ - public long handleDirectKeysForOBSLayout(boolean withReplica, - boolean listFile, - List duData, - Table keyTable, - String seekPrefix) - throws IOException { - long keyDataSizeWithReplica = 0L; - - - try ( - TableIterator> iterator = keyTable.iterator()) { - iterator.seek(seekPrefix); - - while (iterator.hasNext()) { - Table.KeyValue kv = iterator.next(); - String dbKey = kv.getKey(); - - if (!dbKey.startsWith(seekPrefix)) { - break; - } - - OmKeyInfo keyInfo = kv.getValue(); - if (keyInfo != null) { - createDiskUsage(keyInfo, withReplica, listFile, duData); - if (withReplica) { - long keyDU = keyInfo.getReplicatedSize(); - keyDataSizeWithReplica += keyDU; - } - } - } - } - - return keyDataSizeWithReplica; - } - - /** - * This method handles disk usage of direct keys for OBS layout. - * @param parentId parent directory/bucket - * @param withReplica if withReplica is enabled, set sizeWithReplica - * @param listFile if listFile is enabled, append key DU as a subpath - * @param duData the current DU data - * @param keyTable the key table - * @param seekPrefix the seek prefix used to position the iterator - * @param nsSummary of the parent directory/bucket - * @return the total DU of all direct keys - * @throws IOException - */ - public long handleDirectKeysForFSOLayout(long parentId, boolean withReplica, - boolean listFile, - List duData, - Table keyTable, - String seekPrefix, - NSSummary nsSummary) - throws IOException { - long keyDataSizeWithReplica = 0L; - if (omBucketInfo.getObjectID() != parentId) { String dirName = nsSummary.getDirName(); seekPrefix += dirName; } String[] seekKeys = seekPrefix.split(OM_KEY_PREFIX); - try ( - TableIterator> - iterator = keyTable.iterator()) { + try (TableIterator> + iterator = keyTable.iterator()) { iterator.seek(seekPrefix); @@ -332,10 +244,21 @@ public long handleDirectKeysForFSOLayout(long parentId, boolean withReplica, if (keyInfo.getKeyName().endsWith(OM_KEY_PREFIX)) { continue; } - createDiskUsage(keyInfo, withReplica, listFile, duData); + DUResponse.DiskUsage diskUsage = new DUResponse.DiskUsage(); + String subpath = buildSubpath(normalizedPath, + keyInfo.getFileName()); + diskUsage.setSubpath(subpath); + diskUsage.setKey(true); + diskUsage.setSize(keyInfo.getDataSize()); + if (withReplica) { long keyDU = keyInfo.getReplicatedSize(); keyDataSizeWithReplica += keyDU; + diskUsage.setSizeWithReplica(keyDU); + } + // list the key as a subpath + if (listFile) { + duData.add(diskUsage); } } } @@ -344,35 +267,6 @@ public long handleDirectKeysForFSOLayout(long parentId, boolean withReplica, return keyDataSizeWithReplica; } - /** - * This method handles disk usage calculation for legacy buckets. - * @param keyInfo the key info - * @param withReplica if withReplica is enabled, set sizeWithReplica - * @param listFile if listFile is enabled, append key DU as a subpath - */ - private DUResponse.DiskUsage createDiskUsage(OmKeyInfo keyInfo, - boolean withReplica, - boolean listFile, - List duData) { - DUResponse.DiskUsage diskUsage = new DUResponse.DiskUsage(); - String objectName = keyInfo.getKeyName(); - diskUsage.setSubpath(objectName); - diskUsage.setKey(true); - diskUsage.setSize(keyInfo.getDataSize()); - - if (withReplica) { - long keyDU = keyInfo.getReplicatedSize(); - diskUsage.setSizeWithReplica(keyDU); - } - - if (listFile) { - duData.add(diskUsage); - } - - return diskUsage; - } - - /** * Given a valid path request for a directory, * return the directory object ID. From f9dc5cf32a6fa68a32a43179d2120a84ab9fefb1 Mon Sep 17 00:00:00 2001 From: arafat Date: Thu, 7 Mar 2024 12:56:20 +0530 Subject: [PATCH 07/19] Fixes a few edge cases --- .../java/org/apache/hadoop/ozone/OmUtils.java | 40 ++++++++ .../recon/api/handlers/EntityHandler.java | 94 +++++++++++-------- .../ozone/recon/tasks/NSSummaryTask.java | 54 ++++++++--- 3 files changed, 135 insertions(+), 53 deletions(-) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java index d58d922b0e07..f5ed8ff20556 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java @@ -743,6 +743,46 @@ public static String normalizeKey(String keyName, return keyName; } + /** + * Normalizes a given path up to the bucket level. + * + * This method takes a path as input and normalises uptil the bucket level. + * It handles empty, removes leading slashes, and splits the path into + * segments. It then extracts the volume and bucket names, forming a + * normalized path with a single slash. Finally, any remaining segments are + * joined as the key name, returning the complete standardized path. + * + * @param path The path string to be normalized. + * @return The normalized path string. + */ + public static String normalizePathUptilBucket(String path) { + if (path == null || path.isEmpty()) { + return "/"; // Handle empty path + } + + // Remove leading slashes + path = path.replaceAll("^/*", ""); + + String[] segments = path.split("/", -1); + + String volumeName = segments[0]; + String bucketName = segments.length > 1 ? segments[1] : ""; + + // Combine volume and bucket. + StringBuilder normalizedPath = new StringBuilder(volumeName); + if (!bucketName.isEmpty()) { + normalizedPath.append("/").append(bucketName); + } + + // Add remaining segments as the key + if (segments.length > 2) { + normalizedPath.append("/").append( + String.join("/", Arrays.copyOfRange(segments, 2, segments.length))); + } + + return normalizedPath.toString(); + } + /** * For a given service ID, return list of configured OM hosts. diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java index 35a160490a4a..93c91c8e1d89 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java @@ -17,11 +17,9 @@ */ package org.apache.hadoop.ozone.recon.api.handlers; -import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.recon.ReconConstants; import org.apache.hadoop.ozone.recon.api.types.NamespaceSummaryResponse; import org.apache.hadoop.ozone.recon.api.types.DUResponse; @@ -63,9 +61,18 @@ public EntityHandler( this.omMetadataManager = omMetadataManager; this.reconSCM = reconSCM; this.bucketHandler = bucketHandler; - normalizedPath = normalizePath(path); - names = parseRequestPath(normalizedPath); + // Defaulting to FILE_SYSTEM_OPTIMIZED if bucketHandler is null + BucketLayout layout = + (bucketHandler != null) ? bucketHandler.getBucketLayout() : + BucketLayout.FILE_SYSTEM_OPTIMIZED; + + // Normalize the path based on the determined layout + normalizedPath = normalizePath(path, layout); + + // Choose the parsing method based on the bucket layout + names = (layout == BucketLayout.OBJECT_STORE) ? + parseObjectStorePath(normalizedPath) : parseRequestPath(normalizedPath); } public abstract NamespaceSummaryResponse getSummaryResponse() @@ -121,7 +128,8 @@ public static EntityHandler getEntityHandler( String path) throws IOException { BucketHandler bucketHandler; - String normalizedPath = normalizePath(path); + String normalizedPath = + normalizePath(path, BucketLayout.FILE_SYSTEM_OPTIMIZED); String[] names = parseRequestPath(normalizedPath); if (path.equals(OM_KEY_PREFIX)) { return EntityType.ROOT.create(reconNamespaceSummaryManager, @@ -158,39 +166,37 @@ public static EntityHandler getEntityHandler( } else { // length > 3. check dir or key existence String volName = names[0]; String bucketName = names[1]; - String keyName; + // Assuming getBucketHandler already validates volume and bucket existence bucketHandler = BucketHandler.getBucketHandler( - reconNamespaceSummaryManager, - omMetadataManager, reconSCM, - volName, bucketName); + reconNamespaceSummaryManager, omMetadataManager, reconSCM, volName, + bucketName); - // Check if the bucketHandler is null or if the volume/bucket does not exist - if (bucketHandler == null || - !omMetadataManager.volumeExists(volName) || - !bucketHandler.bucketExists(volName, bucketName)) { + if (bucketHandler == null) { return EntityType.UNKNOWN.create(reconNamespaceSummaryManager, omMetadataManager, reconSCM, null, path); } - // If the bucket layout is OBJECT_STORE, use parseObjectStorePath to get the key name + // Directly handle path normalization and parsing based on the layout if (bucketHandler.getBucketLayout() == BucketLayout.OBJECT_STORE) { - String[] parsedObjectStorePath = parseObjectStorePath(path); - if (parsedObjectStorePath == null || - parsedObjectStorePath.length != 3) { - // Handle invalid path format when expecting OBJECT_STORE layout + String[] parsedObjectLayoutPath = parseObjectStorePath( + normalizePath(path, bucketHandler.getBucketLayout())); + if (parsedObjectLayoutPath == null) { return EntityType.UNKNOWN.create(reconNamespaceSummaryManager, omMetadataManager, reconSCM, null, path); } - keyName = parsedObjectStorePath[2]; + // Use the key part directly from the parsed path + return bucketHandler.determineKeyPath(parsedObjectLayoutPath[2]) + .create(reconNamespaceSummaryManager, omMetadataManager, reconSCM, + bucketHandler, path); } else { - // For non-OBJECT_STORE layouts, derive the keyName using the existing names array - keyName = BucketHandler.getKeyName(names); + // Use the existing names array for non-OBJECT_STORE layouts to derive + // the keyName + String keyName = BucketHandler.getKeyName(names); + return bucketHandler.determineKeyPath(keyName) + .create(reconNamespaceSummaryManager, omMetadataManager, reconSCM, + bucketHandler, path); } - - return bucketHandler.determineKeyPath(keyName) - .create(reconNamespaceSummaryManager, - omMetadataManager, reconSCM, bucketHandler, path); } } @@ -276,22 +282,17 @@ public static String[] parseRequestPath(String path) { /** * Splits an object store path into volume, bucket, and key name components. - *

+ * * This method parses a path of the format "/volumeName/bucketName/keyName", * including paths with additional '/' characters within the key name. It's - * designed for object store paths where the first three '/' characters separate - * the root, volume and bucket names from the key name. + * designed for object store paths where the first three '/' characters + * separate the root, volume and bucket names from the key name. * * @param path The object store path to parse, starting with a slash. - * @return A String array with three elements: volume name, bucket name, and key name, - * or {@code null} if the path format is invalid. + * @return A String array with three elements: volume name, bucket name, and + * key name, or {null} if the path format is invalid. */ public static String[] parseObjectStorePath(String path) { - // Ensure the path starts with a slash and has a sufficient length - if (!path.startsWith("/") || path.length() < 2) { - return null; - } - // Removing the leading slash for correct splitting path = path.substring(1); @@ -299,15 +300,32 @@ public static String[] parseObjectStorePath(String path) { String[] parts = path.split("/", 3); // Checking if we correctly obtained 3 parts after removing the leading slash - if (parts.length == 3) { + if (parts.length <= 3) { return parts; } else { return null; } } - - private static String normalizePath(String path) { + /** + * Normalizes a given path based on the specified bucket layout. + * + * This method adjusts the path according to the bucket layout. + * For {OBJECT_STORE Layout}, it normalizes the path up to the bucket level + * using OmUtils.normalizePathUptilBucket. For other layouts, it + * normalizes the entire path, including the key, using + * OmUtils.normalizeKey, and does not preserve any trailing slashes. + * The normalized path will always be prefixed with OM_KEY_PREFIX to ensure it + * is consistent with the expected format for object storage paths in Ozone. + * + * @param path + * @param bucketLayout + * @return A normalized path + */ + private static String normalizePath(String path, BucketLayout bucketLayout) { + if (bucketLayout == BucketLayout.OBJECT_STORE) { + return OM_KEY_PREFIX + OmUtils.normalizePathUptilBucket(path); + } return OM_KEY_PREFIX + OmUtils.normalizeKey(path, false); } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java index 61cc8ad10c25..30fdb7c1292e 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java @@ -37,6 +37,8 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.Callable; import java.util.concurrent.Future; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; /** * Task to query data from OMDB and write into Recon RocksDB. @@ -61,12 +63,13 @@ */ public class NSSummaryTask implements ReconOmTask { private static final Logger LOG = - LoggerFactory.getLogger(NSSummaryTask.class); + LoggerFactory.getLogger(NSSummaryTask.class); private final ReconNamespaceSummaryManager reconNamespaceSummaryManager; private final ReconOMMetadataManager reconOMMetadataManager; private final NSSummaryTaskWithFSO nsSummaryTaskWithFSO; private final NSSummaryTaskWithLegacy nsSummaryTaskWithLegacy; + private final NSSummaryTaskWithOBS nsSummaryTaskWithOBS; private final OzoneConfiguration ozoneConfiguration; @Inject @@ -85,6 +88,9 @@ public NSSummaryTask(ReconNamespaceSummaryManager this.nsSummaryTaskWithLegacy = new NSSummaryTaskWithLegacy( reconNamespaceSummaryManager, reconOMMetadataManager, ozoneConfiguration); + this.nsSummaryTaskWithOBS = new NSSummaryTaskWithOBS( + reconNamespaceSummaryManager, + reconOMMetadataManager, ozoneConfiguration); } @Override @@ -94,20 +100,28 @@ public String getTaskName() { @Override public Pair process(OMUpdateEventBatch events) { - boolean success; - success = nsSummaryTaskWithFSO.processWithFSO(events); - if (success) { - success = nsSummaryTaskWithLegacy.processWithLegacy(events); - } else { + boolean success = nsSummaryTaskWithFSO.processWithFSO(events); + if (!success) { LOG.error("processWithFSO failed."); } + success = nsSummaryTaskWithLegacy.processWithLegacy(events); + if (!success) { + LOG.error("processWithLegacy failed."); + } + success = nsSummaryTaskWithOBS.processWithOBS(events); + if (!success) { + LOG.error("processWithOBS failed."); + } return new ImmutablePair<>(getTaskName(), success); } @Override public Pair reprocess(OMMetadataManager omMetadataManager) { + // Initialize a list of tasks to run in parallel Collection> tasks = new ArrayList<>(); + long startTime = System.nanoTime(); // Record start time + try { // reinit Recon RocksDB's namespace CF. reconNamespaceSummaryManager.clearNSSummaryTable(); @@ -121,12 +135,15 @@ public Pair reprocess(OMMetadataManager omMetadataManager) { .reprocessWithFSO(omMetadataManager)); tasks.add(() -> nsSummaryTaskWithLegacy .reprocessWithLegacy(reconOMMetadataManager)); + tasks.add(() -> nsSummaryTaskWithOBS + .reprocessWithOBS(reconOMMetadataManager)); List> results; - ExecutorService executorService = Executors - .newFixedThreadPool(2, - new ThreadFactoryBuilder().setNameFormat("NSSummaryTask - %d") - .build()); + ThreadFactory threadFactory = new ThreadFactoryBuilder() + .setNameFormat("Recon-NSSummaryTask-%d") + .build(); + ExecutorService executorService = Executors.newFixedThreadPool(2, + threadFactory); try { results = executorService.invokeAll(tasks); for (int i = 0; i < results.size(); i++) { @@ -135,17 +152,24 @@ public Pair reprocess(OMMetadataManager omMetadataManager) { } } } catch (InterruptedException ex) { - LOG.error("Error while reprocessing NSSummary " + - "table in Recon DB. ", ex); + LOG.error("Error while reprocessing NSSummary table in Recon DB.", ex); return new ImmutablePair<>(getTaskName(), false); } catch (ExecutionException ex2) { - LOG.error("Error while reprocessing NSSummary " + - "table in Recon DB. ", ex2); + LOG.error("Error while reprocessing NSSummary table in Recon DB.", ex2); return new ImmutablePair<>(getTaskName(), false); } finally { executorService.shutdown(); + + long endTime = System.nanoTime(); + // Convert to milliseconds + long durationInMillis = + TimeUnit.NANOSECONDS.toMillis(endTime - startTime); + + // Log performance metrics + LOG.info("Task execution time: {} milliseconds", durationInMillis); } + return new ImmutablePair<>(getTaskName(), true); } -} +} From f40e121eb5d184fa92f0fec455b2f04b9ca906a7 Mon Sep 17 00:00:00 2001 From: arafat Date: Thu, 7 Mar 2024 16:03:52 +0530 Subject: [PATCH 08/19] Added Unit tests --- ...ummaryTaskWithLegacyObjectStoreLayout.java | 533 ++++++++++++++++++ 1 file changed, 533 insertions(+) create mode 100644 hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacyObjectStoreLayout.java diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacyObjectStoreLayout.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacyObjectStoreLayout.java new file mode 100644 index 000000000000..8f8b605d31d4 --- /dev/null +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacyObjectStoreLayout.java @@ -0,0 +1,533 @@ +package org.apache.hadoop.ozone.recon.tasks; + +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.utils.db.RDBBatchOperation; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.recon.ReconConstants; +import org.apache.hadoop.ozone.recon.ReconTestInjector; +import org.apache.hadoop.ozone.recon.api.types.NSSummary; +import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; +import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; +import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Nested; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; + +import java.io.File; +import java.io.IOException; +import java.util.Arrays; +import java.util.Set; + +import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.*; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDirToOm; +import static org.junit.jupiter.api.Assertions.*; + +/** + * Test for NSSummaryTaskWithLegacy focusing on the OBS (Object Store) layout. + */ +public final class TestNSSummaryTaskWithLegacyObjectStoreLayout { + + private static ReconNamespaceSummaryManager reconNamespaceSummaryManager; + private static ReconOMMetadataManager reconOMMetadataManager; + private static OzoneConfiguration ozoneConfiguration; + private static NSSummaryTaskWithLegacy nSSummaryTaskWithLegacy; + + private static OMMetadataManager omMetadataManager; + private static OzoneConfiguration omConfiguration; + + // Object names + private static final String VOL = "vol"; + private static final String BUCKET_ONE = "bucket1"; + private static final String BUCKET_TWO = "bucket2"; + private static final String KEY_ONE = "key1"; + private static final String KEY_TWO = "key2"; + private static final String KEY_THREE = "dir1/dir2/key3"; + private static final String KEY_FOUR = "key4///////////"; + private static final String KEY_FIVE = "//////////"; + private static final String KEY_SIX = "key6"; + private static final String KEY_SEVEN = "/////key7"; + + private static final String TEST_USER = "TestUser"; + + private static final long PARENT_OBJECT_ID_ZERO = 0L; + private static final long VOL_OBJECT_ID = 0L; + private static final long BUCKET_ONE_OBJECT_ID = 1L; + private static final long BUCKET_TWO_OBJECT_ID = 2L; + private static final long KEY_ONE_OBJECT_ID = 3L; + private static final long KEY_TWO_OBJECT_ID = 5L; + private static final long KEY_FOUR_OBJECT_ID = 6L; + private static final long KEY_THREE_OBJECT_ID = 8L; + private static final long KEY_FIVE_OBJECT_ID = 9L; + private static final long KEY_SIX_OBJECT_ID = 10L; + private static final long KEY_SEVEN_OBJECT_ID = 11L; + + + private static final long KEY_ONE_SIZE = 500L; + private static final long KEY_TWO_OLD_SIZE = 1025L; + private static final long KEY_TWO_UPDATE_SIZE = 1023L; + private static final long KEY_THREE_SIZE = + ReconConstants.MAX_FILE_SIZE_UPPER_BOUND - 100L; + private static final long KEY_FOUR_SIZE = 2050L; + private static final long KEY_FIVE_SIZE = 100L; + private static final long KEY_SIX_SIZE = 6000L; + private static final long KEY_SEVEN_SIZE = 7000L; + + private TestNSSummaryTaskWithLegacyObjectStoreLayout() { + } + + @BeforeAll + public static void setUp(@TempDir File tmpDir) throws Exception { + initializeNewOmMetadataManager(new File(tmpDir, "om")); + OzoneManagerServiceProviderImpl ozoneManagerServiceProvider = + getMockOzoneManagerServiceProviderWithFSO(); + reconOMMetadataManager = getTestReconOmMetadataManager(omMetadataManager, + new File(tmpDir, "recon")); + ozoneConfiguration = new OzoneConfiguration(); + ozoneConfiguration.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, + false); + + ReconTestInjector reconTestInjector = + new ReconTestInjector.Builder(tmpDir) + .withReconOm(reconOMMetadataManager) + .withOmServiceProvider(ozoneManagerServiceProvider) + .withReconSqlDb() + .withContainerDB() + .build(); + reconNamespaceSummaryManager = + reconTestInjector.getInstance(ReconNamespaceSummaryManager.class); + + NSSummary nonExistentSummary = + reconNamespaceSummaryManager.getNSSummary(BUCKET_ONE_OBJECT_ID); + assertNull(nonExistentSummary); + + populateOMDB(); + + nSSummaryTaskWithLegacy = new NSSummaryTaskWithLegacy( + reconNamespaceSummaryManager, + reconOMMetadataManager, ozoneConfiguration); + } + + /** + * Nested class for testing NSSummaryTaskWithLegacy reprocess. + */ + @Nested + public class TestReprocess { + + private NSSummary nsSummaryForBucket1; + private NSSummary nsSummaryForBucket2; + + @BeforeEach + public void setUp() throws IOException { + // write a NSSummary prior to reprocess + // verify it got cleaned up after. + NSSummary staleNSSummary = new NSSummary(); + RDBBatchOperation rdbBatchOperation = new RDBBatchOperation(); + reconNamespaceSummaryManager.batchStoreNSSummaries(rdbBatchOperation, -1L, + staleNSSummary); + reconNamespaceSummaryManager.commitBatchOperation(rdbBatchOperation); + + // Verify commit + assertNotNull(reconNamespaceSummaryManager.getNSSummary(-1L)); + + // reinit Recon RocksDB's namespace CF. + reconNamespaceSummaryManager.clearNSSummaryTable(); + + nSSummaryTaskWithLegacy.reprocessWithLegacy(reconOMMetadataManager); + assertNull(reconNamespaceSummaryManager.getNSSummary(-1L)); + + nsSummaryForBucket1 = + reconNamespaceSummaryManager.getNSSummary(BUCKET_ONE_OBJECT_ID); + nsSummaryForBucket2 = + reconNamespaceSummaryManager.getNSSummary(BUCKET_TWO_OBJECT_ID); + assertNotNull(nsSummaryForBucket1); + assertNotNull(nsSummaryForBucket2); + } + + @Test + public void testReprocessNSSummaryNull() throws IOException { + assertNull(reconNamespaceSummaryManager.getNSSummary(-1L)); + } + + @Test + public void testReprocessGetFiles() { + assertEquals(3, nsSummaryForBucket1.getNumOfFiles()); + assertEquals(2, nsSummaryForBucket2.getNumOfFiles()); + + assertEquals(KEY_ONE_SIZE + KEY_TWO_OLD_SIZE + KEY_THREE_SIZE, + nsSummaryForBucket1.getSizeOfFiles()); + assertEquals(KEY_FOUR_SIZE + KEY_FIVE_SIZE, + nsSummaryForBucket2.getSizeOfFiles()); + } + + @Test + public void testReprocessFileBucketSize() { + int[] fileDistBucket1 = nsSummaryForBucket1.getFileSizeBucket(); + int[] fileDistBucket2 = nsSummaryForBucket2.getFileSizeBucket(); + assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS, + fileDistBucket1.length); + assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS, + fileDistBucket2.length); + + // Check for 1's and 0's in fileDistBucket1 + int[] expectedIndexes1 = {0, 1, 40}; + for (int index = 0; index < fileDistBucket1.length; index++) { + if (contains(expectedIndexes1, index)) { + assertEquals(1, fileDistBucket1[index]); + } else { + assertEquals(0, fileDistBucket1[index]); + } + } + + // Check for 1's and 0's in fileDistBucket2 + int[] expectedIndexes2 = {0, 2}; + for (int index = 0; index < fileDistBucket2.length; index++) { + if (contains(expectedIndexes2, index)) { + assertEquals(1, fileDistBucket2[index]); + } else { + assertEquals(0, fileDistBucket2[index]); + } + } + } + + } + + /** + * Nested class for testing NSSummaryTaskWithLegacy process. + */ + @Nested + public class TestProcess { + + private NSSummary nsSummaryForBucket1; + private NSSummary nsSummaryForBucket2; + + private OMDBUpdateEvent keyEvent1; + private OMDBUpdateEvent keyEvent2; + private OMDBUpdateEvent keyEvent3; + private OMDBUpdateEvent keyEvent4; + + @BeforeEach + public void setUp() throws IOException { + // reinit Recon RocksDB's namespace CF. + reconNamespaceSummaryManager.clearNSSummaryTable(); + nSSummaryTaskWithLegacy.reprocessWithLegacy(reconOMMetadataManager); + nSSummaryTaskWithLegacy.processWithLegacy(processEventBatch()); + + nsSummaryForBucket1 = + reconNamespaceSummaryManager.getNSSummary(BUCKET_ONE_OBJECT_ID); + assertNotNull(nsSummaryForBucket1); + nsSummaryForBucket2 = + reconNamespaceSummaryManager.getNSSummary(BUCKET_TWO_OBJECT_ID); + assertNotNull(nsSummaryForBucket2); + } + + private OMUpdateEventBatch processEventBatch() throws IOException { + // Test PUT Event. + // PUT Key6 in Bucket2. + String omPutKey = + OM_KEY_PREFIX + VOL + + OM_KEY_PREFIX + BUCKET_TWO + + OM_KEY_PREFIX + KEY_SIX; + OmKeyInfo omPutKeyInfo = buildOmKeyInfo(VOL, BUCKET_TWO, KEY_SIX, + KEY_SIX, KEY_SIX_OBJECT_ID, BUCKET_TWO_OBJECT_ID, KEY_SIX_SIZE); + keyEvent1 = new OMDBUpdateEvent. + OMUpdateEventBuilder() + .setKey(omPutKey) + .setValue(omPutKeyInfo) + .setTable(omMetadataManager.getKeyTable(getBucketLayout()) + .getName()) + .setAction(OMDBUpdateEvent.OMDBUpdateAction.PUT) + .build(); + // PUT Key7 in Bucket1. + omPutKey = + OM_KEY_PREFIX + VOL + + OM_KEY_PREFIX + BUCKET_ONE + + OM_KEY_PREFIX + KEY_SEVEN; + omPutKeyInfo = buildOmKeyInfo(VOL, BUCKET_ONE, KEY_SEVEN, + KEY_SEVEN, KEY_SEVEN_OBJECT_ID, BUCKET_ONE_OBJECT_ID, KEY_SEVEN_SIZE); + keyEvent2 = new OMDBUpdateEvent. + OMUpdateEventBuilder() + .setKey(omPutKey) + .setValue(omPutKeyInfo) + .setTable(omMetadataManager.getKeyTable(getBucketLayout()) + .getName()) + .setAction(OMDBUpdateEvent.OMDBUpdateAction.PUT) + .build(); + + // Test DELETE Event. + // Delete Key1 in Bucket1. + String omDeleteKey = + OM_KEY_PREFIX + VOL + + OM_KEY_PREFIX + BUCKET_ONE + + OM_KEY_PREFIX + KEY_ONE; + OmKeyInfo omDeleteKeyInfo = buildOmKeyInfo(VOL, BUCKET_ONE, KEY_ONE, + KEY_ONE, KEY_ONE_OBJECT_ID, BUCKET_ONE_OBJECT_ID, KEY_ONE_SIZE); + keyEvent3 = new OMDBUpdateEvent. + OMUpdateEventBuilder() + .setKey(omDeleteKey) + .setTable(omMetadataManager.getKeyTable(getBucketLayout()) + .getName()) + .setValue(omDeleteKeyInfo) + .setAction(OMDBUpdateEvent.OMDBUpdateAction.DELETE) + .build(); + + // Test UPDATE Event. + // Resize Key2 in Bucket1. + String omResizeKey = + OM_KEY_PREFIX + VOL + + OM_KEY_PREFIX + BUCKET_ONE + + OM_KEY_PREFIX + KEY_TWO; + OmKeyInfo oldOmResizeKeyInfo = + buildOmKeyInfo(VOL, BUCKET_ONE, KEY_TWO, KEY_TWO, KEY_TWO_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, KEY_TWO_OLD_SIZE); + OmKeyInfo newOmResizeKeyInfo = + buildOmKeyInfo(VOL, BUCKET_ONE, KEY_TWO, KEY_TWO, KEY_TWO_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, KEY_TWO_OLD_SIZE + 100); + keyEvent4 = new OMDBUpdateEvent. + OMUpdateEventBuilder() + .setKey(omResizeKey) + .setOldValue(oldOmResizeKeyInfo) + .setValue(newOmResizeKeyInfo) + .setTable(omMetadataManager.getKeyTable(getBucketLayout()) + .getName()) + .setAction(OMDBUpdateEvent.OMDBUpdateAction.UPDATE) + .build(); + + return new OMUpdateEventBatch( + Arrays.asList(keyEvent1, keyEvent2, keyEvent3, keyEvent4)); + } + + @Test + public void testProcessForCount() throws IOException { + assertNotNull(nsSummaryForBucket1); + assertEquals(3, nsSummaryForBucket1.getNumOfFiles()); + assertNotNull(nsSummaryForBucket2); + assertEquals(3, nsSummaryForBucket2.getNumOfFiles()); + + Set childDirBucket1 = nsSummaryForBucket1.getChildDir(); + assertEquals(0, childDirBucket1.size()); + Set childDirBucket2 = nsSummaryForBucket2.getChildDir(); + assertEquals(0, childDirBucket2.size()); + } + + @Test + public void testProcessForSize() throws IOException { + assertNotNull(nsSummaryForBucket1); + assertEquals( + KEY_THREE_SIZE + KEY_SEVEN_SIZE + KEY_TWO_OLD_SIZE + 100, + nsSummaryForBucket1.getSizeOfFiles()); + assertNotNull(nsSummaryForBucket2); + assertEquals(KEY_FOUR_SIZE + KEY_FIVE_SIZE + KEY_SIX_SIZE, + nsSummaryForBucket2.getSizeOfFiles()); + } + + + @Test + public void testProcessFileBucketSize() { + int[] fileDistBucket1 = nsSummaryForBucket1.getFileSizeBucket(); + int[] fileDistBucket2 = nsSummaryForBucket2.getFileSizeBucket(); + assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS, + fileDistBucket1.length); + assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS, + fileDistBucket2.length); + + // Check for 1's and 0's in fileDistBucket1 + int[] expectedIndexes1 = {1, 3, 40}; + for (int index = 0; index < fileDistBucket1.length; index++) { + if (contains(expectedIndexes1, index)) { + assertEquals(1, fileDistBucket1[index]); + } else { + assertEquals(0, fileDistBucket1[index]); + } + } + + // Check for 1's and 0's in fileDistBucket2 + int[] expectedIndexes2 = {0, 2, 3}; + for (int index = 0; index < fileDistBucket2.length; index++) { + if (contains(expectedIndexes2, index)) { + assertEquals(1, fileDistBucket2[index]); + } else { + assertEquals(0, fileDistBucket2[index]); + } + } + } + + } + + /** + * Populate OMDB with the following configs. + * vol + * / \ + * bucket1 bucket2 + * / \ \ \ \ + * key1 key2 key3 key4 key5 + * + * @throws IOException + */ + private static void populateOMDB() throws IOException { + writeKeyToOm(reconOMMetadataManager, + KEY_ONE, + BUCKET_ONE, + VOL, + KEY_ONE, + KEY_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + KEY_ONE_SIZE, + getBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_TWO, + BUCKET_ONE, + VOL, + KEY_TWO, + KEY_TWO_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + KEY_TWO_OLD_SIZE, + getBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_THREE, + BUCKET_ONE, + VOL, + KEY_THREE, + KEY_THREE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + KEY_THREE_SIZE, + getBucketLayout()); + + writeKeyToOm(reconOMMetadataManager, + KEY_FOUR, + BUCKET_TWO, + VOL, + KEY_FOUR, + KEY_FOUR_OBJECT_ID, + BUCKET_TWO_OBJECT_ID, + BUCKET_TWO_OBJECT_ID, + VOL_OBJECT_ID, + KEY_FOUR_SIZE, + getBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_FIVE, + BUCKET_TWO, + VOL, + KEY_FIVE, + KEY_FIVE_OBJECT_ID, + PARENT_OBJECT_ID_ZERO, + BUCKET_TWO_OBJECT_ID, + VOL_OBJECT_ID, + KEY_FIVE_SIZE, + getBucketLayout()); + } + + /** + * Create a new OM Metadata manager instance with one user, one vol, and two + * buckets. + * + * @throws IOException ioEx + */ + private static void initializeNewOmMetadataManager( + File omDbDir) + throws IOException { + omConfiguration = new OzoneConfiguration(); + omConfiguration.set(OZONE_OM_DB_DIRS, + omDbDir.getAbsolutePath()); + omConfiguration.set(OMConfigKeys + .OZONE_OM_ENABLE_FILESYSTEM_PATHS, "true"); + omMetadataManager = new OmMetadataManagerImpl( + omConfiguration, null); + + String volumeKey = omMetadataManager.getVolumeKey(VOL); + OmVolumeArgs args = + OmVolumeArgs.newBuilder() + .setObjectID(VOL_OBJECT_ID) + .setVolume(VOL) + .setAdminName(TEST_USER) + .setOwnerName(TEST_USER) + .build(); + omMetadataManager.getVolumeTable().put(volumeKey, args); + + OmBucketInfo bucketInfo1 = OmBucketInfo.newBuilder() + .setVolumeName(VOL) + .setBucketName(BUCKET_ONE) + .setObjectID(BUCKET_ONE_OBJECT_ID) + .setBucketLayout(getBucketLayout()) + .build(); + + OmBucketInfo bucketInfo2 = OmBucketInfo.newBuilder() + .setVolumeName(VOL) + .setBucketName(BUCKET_TWO) + .setObjectID(BUCKET_TWO_OBJECT_ID) + .setBucketLayout(getBucketLayout()) + .build(); + + String bucketKey = omMetadataManager.getBucketKey( + bucketInfo1.getVolumeName(), bucketInfo1.getBucketName()); + String bucketKey2 = omMetadataManager.getBucketKey( + bucketInfo2.getVolumeName(), bucketInfo2.getBucketName()); + + omMetadataManager.getBucketTable().put(bucketKey, bucketInfo1); + omMetadataManager.getBucketTable().put(bucketKey2, bucketInfo2); + } + + /** + * Build a key info for put/update action. + * + * @param volume volume name + * @param bucket bucket name + * @param key key name + * @param fileName file name + * @param objectID object ID + * @param parentObjectId parent object ID + * @param dataSize file size + * @return the KeyInfo + */ + private static OmKeyInfo buildOmKeyInfo(String volume, + String bucket, + String key, + String fileName, + long objectID, + long parentObjectId, + long dataSize) { + return new OmKeyInfo.Builder() + .setBucketName(bucket) + .setVolumeName(volume) + .setKeyName(key) + .setFileName(fileName) + .setReplicationConfig( + StandaloneReplicationConfig.getInstance( + HddsProtos.ReplicationFactor.ONE)) + .setObjectID(objectID) + .setParentObjectID(parentObjectId) + .setDataSize(dataSize) + .build(); + } + + // Helper method to check if an array contains a specific value + private boolean contains(int[] arr, int value) { + for (int num : arr) { + if (num == value) { + return true; + } + } + return false; + } + + private static BucketLayout getBucketLayout() { + return BucketLayout.LEGACY; + } +} From e56c3bf6a3ebe246864f2b0a72fbc54742089999 Mon Sep 17 00:00:00 2001 From: arafat Date: Thu, 7 Mar 2024 19:54:10 +0530 Subject: [PATCH 09/19] Added unit tests for NSSummaryEndpoint With OBS and Legacy buckets --- ...estNSSummaryEndpointWithOBSAndLegacy.java} | 318 ++++++++++++++---- ...ummaryTaskWithLegacyObjectStoreLayout.java | 4 +- 2 files changed, 259 insertions(+), 63 deletions(-) rename hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/{TestNSSummaryEndpointWithOBS.java => TestNSSummaryEndpointWithOBSAndLegacy.java} (79%) diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBS.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java similarity index 79% rename from hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBS.java rename to hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java index ac8dee5f0937..b57eef5e6ace 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBS.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java @@ -62,6 +62,7 @@ import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider; import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; import org.apache.hadoop.ozone.recon.spi.impl.StorageContainerServiceProviderImpl; +import org.apache.hadoop.ozone.recon.tasks.NSSummaryTaskWithLegacy; import org.apache.hadoop.ozone.recon.tasks.NSSummaryTaskWithOBS; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -91,28 +92,37 @@ import static org.mockito.Mockito.when; /** - * Test for NSSummary REST APIs with OBS. - * Testing is done on a simple object store model with a flat hierarchy: - * Testing the following case. - * ├── vol - * │ ├── bucket1 - * │ │ ├── file1 - * │ │ └── file2 - * │ │ └── file3 - * │ └── bucket2 - * │ ├── file4 - * │ └── file5 - * └── vol2 - * ├── bucket3 - * │ ├── file8 - * │ ├── file9 - * │ └── file10 - * └── bucket4 - * └── file11 - * This tests the Rest APIs for NSSummary in the context of OBS buckets, - * focusing on disk usage, quota usage, and file size distribution. + * Tests the NSSummary REST APIs within the context of an Object Store (OBS) layout, + * as well as Legacy layout buckets with FileSystemPaths disabled. The tests aim to + * validate API responses for buckets that follow the flat hierarchy model typical + * of OBS layouts. + *

+ * The test environment simulates a simple object storage structure with volumes + * containing buckets, which in turn contain files. Specifically, it includes: + * - Two OBS layout buckets (bucket1 and bucket2) under 'vol', each containing + * multiple files. + * - Two Legacy layout buckets (bucket3 and bucket4) under 'vol2', with 'bucket4' + * the fileSystemEnabled flag set to false for these legacy buckets. + *

+ * The directory structure for testing is as follows: + * . + * └── vol + * ├── bucket1 (OBS) + * │ ├── file1 + * │ ├── file2 + * │ └── file3 + * └── bucket2 (OBS) + * ├── file4 + * └── file5 + * └── vol2 + * ├── bucket3 (Legacy) + * │ ├── file8 + * │ ├── file9 + * │ └── file10 + * └── bucket4 (Legacy) + * └── file11 */ -public class TestNSSummaryEndpointWithOBS { +public class TestNSSummaryEndpointWithOBSAndLegacy { @TempDir private Path temporaryFolder; @@ -256,6 +266,13 @@ public class TestNSSummaryEndpointWithOBS { + FILE2_SIZE_WITH_REPLICA + FILE3_SIZE_WITH_REPLICA; + private static final long + MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_BUCKET3 + = FILE8_SIZE_WITH_REPLICA + + FILE9_SIZE_WITH_REPLICA + + FILE10_SIZE_WITH_REPLICA; + + private static final long MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_KEY = FILE4_SIZE_WITH_REPLICA; @@ -278,6 +295,10 @@ public class TestNSSummaryEndpointWithOBS { ROOT_PATH + VOL + ROOT_PATH + BUCKET_ONE; private static final String BUCKET_TWO_PATH = ROOT_PATH + VOL + ROOT_PATH + BUCKET_TWO; + private static final String BUCKET_THREE_PATH = + ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_THREE; + private static final String BUCKET_FOUR_PATH = + ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_FOUR; private static final String KEY_PATH = ROOT_PATH + VOL + ROOT_PATH + BUCKET_TWO + ROOT_PATH + KEY_FOUR; private static final String MULTI_BLOCK_KEY_PATH = @@ -302,10 +323,17 @@ public class TestNSSummaryEndpointWithOBS { private static final long BUCKET_TWO_DATA_SIZE = FILE_FOUR_SIZE + FILE_FIVE_SIZE; + private static final long BUCKET_THREE_DATA_SIZE = + FILE_EIGHT_SIZE + FILE_NINE_SIZE + FILE_TEN_SIZE; + + private static final long BUCKET_FOUR_DATA_SIZE = FILE_ELEVEN_SIZE; + @BeforeEach public void setUp() throws Exception { conf = new OzoneConfiguration(); + // By setting this config our Legacy buckets will behave like OBS buckets. + conf.set(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, "false"); OMMetadataManager omMetadataManager = initializeNewOmMetadataManager( Files.createDirectory(temporaryFolder.resolve( "JunitOmDBDir")).toFile(), conf); @@ -337,6 +365,10 @@ public void setUp() throws Exception { new NSSummaryTaskWithOBS(reconNamespaceSummaryManager, reconOMMetadataManager, conf); nsSummaryTaskWithOBS.reprocessWithOBS(reconOMMetadataManager); + NSSummaryTaskWithLegacy nsSummaryTaskWithLegacy = + new NSSummaryTaskWithLegacy(reconNamespaceSummaryManager, + reconOMMetadataManager, conf); + nsSummaryTaskWithLegacy.reprocessWithLegacy(reconOMMetadataManager); commonUtils = new CommonUtils(); } @@ -381,6 +413,26 @@ public void testGetBasicInfoVol() throws Exception { assertEquals(-1, volResponseObj.getObjectDBInfo().getQuotaInNamespace()); } + @Test + public void testGetBasicInfoVolTwo() throws Exception { + // Test volume 2's basics + Response volTwoResponse = nsSummaryEndpoint.getBasicInfo(VOL_TWO_PATH); + NamespaceSummaryResponse volTwoResponseObj = + (NamespaceSummaryResponse) volTwoResponse.getEntity(); + assertEquals(EntityType.VOLUME, + volTwoResponseObj.getEntityType()); + assertEquals(2, volTwoResponseObj.getCountStats().getNumBucket()); + assertEquals(4, volTwoResponseObj.getCountStats().getNumTotalKey()); + assertEquals(TEST_USER, ((VolumeObjectDBInfo) volTwoResponseObj. + getObjectDBInfo()).getAdmin()); + assertEquals(TEST_USER, ((VolumeObjectDBInfo) volTwoResponseObj. + getObjectDBInfo()).getOwner()); + assertEquals(VOL_TWO, volTwoResponseObj.getObjectDBInfo().getName()); + assertEquals(2097152, + volTwoResponseObj.getObjectDBInfo().getQuotaInBytes()); + assertEquals(-1, volTwoResponseObj.getObjectDBInfo().getQuotaInNamespace()); + } + @Test public void testGetBasicInfoBucketOne() throws Exception { // Test bucket 1's basics @@ -395,7 +447,7 @@ public void testGetBasicInfoBucketOne() throws Exception { assertEquals(StorageType.DISK, ((BucketObjectDBInfo) bucketOneObj.getObjectDBInfo()).getStorageType()); - assertEquals(getBucketLayout(), + assertEquals(getOBSBucketLayout(), ((BucketObjectDBInfo) bucketOneObj.getObjectDBInfo()).getBucketLayout()); assertEquals(BUCKET_ONE, @@ -405,9 +457,64 @@ public void testGetBasicInfoBucketOne() throws Exception { @Test public void testGetBasicInfoBucketTwo() throws Exception { // Test bucket 2's basics - commonUtils.testNSSummaryBasicInfoBucketTwo( - BucketLayout.OBJECT_STORE, - nsSummaryEndpoint); + Response bucketTwoResponse = + nsSummaryEndpoint.getBasicInfo(BUCKET_TWO_PATH); + NamespaceSummaryResponse bucketTwoObj = + (NamespaceSummaryResponse) bucketTwoResponse.getEntity(); + assertEquals(EntityType.BUCKET, bucketTwoObj.getEntityType()); + assertEquals(2, bucketTwoObj.getCountStats().getNumTotalKey()); + assertEquals(VOL, + ((BucketObjectDBInfo) bucketTwoObj.getObjectDBInfo()).getVolumeName()); + assertEquals(StorageType.DISK, + ((BucketObjectDBInfo) + bucketTwoObj.getObjectDBInfo()).getStorageType()); + assertEquals(getOBSBucketLayout(), + ((BucketObjectDBInfo) + bucketTwoObj.getObjectDBInfo()).getBucketLayout()); + assertEquals(BUCKET_TWO, + ((BucketObjectDBInfo) bucketTwoObj.getObjectDBInfo()).getName()); + } + + @Test + public void testGetBasicInfoBucketThree() throws Exception { + // Test bucket 3's basics + Response bucketThreeResponse = + nsSummaryEndpoint.getBasicInfo(BUCKET_THREE_PATH); + NamespaceSummaryResponse bucketThreeObj = (NamespaceSummaryResponse) + bucketThreeResponse.getEntity(); + assertEquals(EntityType.BUCKET, bucketThreeObj.getEntityType()); + assertEquals(3, bucketThreeObj.getCountStats().getNumTotalKey()); + assertEquals(VOL_TWO, + ((BucketObjectDBInfo) bucketThreeObj.getObjectDBInfo()).getVolumeName()); + assertEquals(StorageType.DISK, + ((BucketObjectDBInfo) + bucketThreeObj.getObjectDBInfo()).getStorageType()); + assertEquals(getLegacyBucketLayout(), + ((BucketObjectDBInfo) + bucketThreeObj.getObjectDBInfo()).getBucketLayout()); + assertEquals(BUCKET_THREE, + ((BucketObjectDBInfo) bucketThreeObj.getObjectDBInfo()).getName()); + } + + @Test + public void testGetBasicInfoBucketFour() throws Exception { + // Test bucket 4's basics + Response bucketFourResponse = + nsSummaryEndpoint.getBasicInfo(BUCKET_FOUR_PATH); + NamespaceSummaryResponse bucketFourObj = + (NamespaceSummaryResponse) bucketFourResponse.getEntity(); + assertEquals(EntityType.BUCKET, bucketFourObj.getEntityType()); + assertEquals(1, bucketFourObj.getCountStats().getNumTotalKey()); + assertEquals(VOL_TWO, + ((BucketObjectDBInfo) bucketFourObj.getObjectDBInfo()).getVolumeName()); + assertEquals(StorageType.DISK, + ((BucketObjectDBInfo) + bucketFourObj.getObjectDBInfo()).getStorageType()); + assertEquals(getLegacyBucketLayout(), + ((BucketObjectDBInfo) + bucketFourObj.getObjectDBInfo()).getBucketLayout()); + assertEquals(BUCKET_FOUR, + ((BucketObjectDBInfo) bucketFourObj.getObjectDBInfo()).getName()); } @Test @@ -461,16 +568,77 @@ public void testDiskUsageVolume() throws Exception { } @Test - public void testDiskUsageBucket() throws Exception { + public void testDiskUsageVolTwo() throws Exception { + // volume level DU + Response volResponse = nsSummaryEndpoint.getDiskUsage(VOL_TWO_PATH, + false, false); + DUResponse duVolRes = (DUResponse) volResponse.getEntity(); + assertEquals(2, duVolRes.getCount()); + List duData = duVolRes.getDuData(); + // sort based on subpath + Collections.sort(duData, + Comparator.comparing(DUResponse.DiskUsage::getSubpath)); + DUResponse.DiskUsage duBucket3 = duData.get(0); + DUResponse.DiskUsage duBucket4 = duData.get(1); + assertEquals(BUCKET_THREE_PATH, duBucket3.getSubpath()); + assertEquals(BUCKET_FOUR_PATH, duBucket4.getSubpath()); + assertEquals(VOL_TWO_DATA_SIZE, duVolRes.getSize()); + } + + @Test + public void testDiskUsageBucketOne() throws Exception { // bucket level DU Response bucketResponse = nsSummaryEndpoint.getDiskUsage(BUCKET_ONE_PATH, false, false); DUResponse duBucketResponse = (DUResponse) bucketResponse.getEntity(); // There are no sub-paths under this OBS bucket. assertEquals(0, duBucketResponse.getCount()); + + Response bucketResponseWithSubpath = nsSummaryEndpoint.getDiskUsage( + BUCKET_ONE_PATH, true, false); + DUResponse duBucketResponseWithFiles = + (DUResponse) bucketResponseWithSubpath.getEntity(); + assertEquals(3, duBucketResponseWithFiles.getCount()); + assertEquals(BUCKET_ONE_DATA_SIZE, duBucketResponse.getSize()); } + @Test + public void testDiskUsageBucketTwo() throws Exception { + // bucket level DU + Response bucketResponse = nsSummaryEndpoint.getDiskUsage(BUCKET_TWO_PATH, + false, false); + DUResponse duBucketResponse = (DUResponse) bucketResponse.getEntity(); + // There are no sub-paths under this OBS bucket. + assertEquals(0, duBucketResponse.getCount()); + + Response bucketResponseWithSubpath = nsSummaryEndpoint.getDiskUsage( + BUCKET_TWO_PATH, true, false); + DUResponse duBucketResponseWithFiles = + (DUResponse) bucketResponseWithSubpath.getEntity(); + assertEquals(2, duBucketResponseWithFiles.getCount()); + + assertEquals(BUCKET_TWO_DATA_SIZE, duBucketResponse.getSize()); + } + + @Test + public void testDiskUsageBucketThree() throws Exception { + // bucket level DU + Response bucketResponse = nsSummaryEndpoint.getDiskUsage(BUCKET_THREE_PATH, + false, false); + DUResponse duBucketResponse = (DUResponse) bucketResponse.getEntity(); + // There are no sub-paths under this Legacy bucket. + assertEquals(0, duBucketResponse.getCount()); + + Response bucketResponseWithSubpath = nsSummaryEndpoint.getDiskUsage( + BUCKET_THREE_PATH, true, false); + DUResponse duBucketResponseWithFiles = + (DUResponse) bucketResponseWithSubpath.getEntity(); + assertEquals(3, duBucketResponseWithFiles.getCount()); + + assertEquals(BUCKET_THREE_DATA_SIZE, duBucketResponse.getSize()); + } + @Test public void testDiskUsageKey() throws Exception { // key level DU @@ -531,7 +699,7 @@ public void testDataSizeUnderVolWithReplication() throws IOException { } @Test - public void testDataSizeUnderBucketWithReplication() throws IOException { + public void testDataSizeUnderBucketOneWithReplication() throws IOException { setUpMultiBlockReplicatedKeys(); Response bucketResponse = nsSummaryEndpoint.getDiskUsage(BUCKET_ONE_PATH, false, true); @@ -541,6 +709,17 @@ public void testDataSizeUnderBucketWithReplication() throws IOException { replicaDUResponse.getSizeWithReplica()); } + @Test + public void testDataSizeUnderBucketThreeWithReplication() throws IOException { + setUpMultiBlockReplicatedKeys(); + Response bucketResponse = nsSummaryEndpoint.getDiskUsage(BUCKET_THREE_PATH, + false, true); + DUResponse replicaDUResponse = (DUResponse) bucketResponse.getEntity(); + assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus()); + assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_BUCKET3, + replicaDUResponse.getSizeWithReplica()); + } + @Test public void testDataSizeUnderKeyWithReplication() throws IOException { setUpMultiBlockReplicatedKeys(); @@ -579,6 +758,18 @@ public void testQuotaUsage() throws Exception { assertEquals(BUCKET_TWO_QUOTA, quBucketRes2.getQuota()); assertEquals(BUCKET_TWO_DATA_SIZE, quBucketRes2.getQuotaUsed()); + Response bucketRes3 = nsSummaryEndpoint.getQuotaUsage(BUCKET_THREE_PATH); + QuotaUsageResponse quBucketRes3 = + (QuotaUsageResponse) bucketRes3.getEntity(); + assertEquals(BUCKET_THREE_QUOTA, quBucketRes3.getQuota()); + assertEquals(BUCKET_THREE_DATA_SIZE, quBucketRes3.getQuotaUsed()); + + Response bucketRes4 = nsSummaryEndpoint.getQuotaUsage(BUCKET_FOUR_PATH); + QuotaUsageResponse quBucketRes4 = + (QuotaUsageResponse) bucketRes4.getEntity(); + assertEquals(BUCKET_FOUR_QUOTA, quBucketRes4.getQuota()); + assertEquals(BUCKET_FOUR_DATA_SIZE, quBucketRes4.getQuotaUsed()); + // other level not applicable Response naResponse2 = nsSummaryEndpoint.getQuotaUsage(KEY_PATH); QuotaUsageResponse quotaUsageResponse2 = @@ -619,24 +810,25 @@ public void checkFileSizeDist(String path, int bin0, /** * Testing the following case. - * ├── vol - * │ ├── bucket1 - * │ │ ├── file1 - * │ │ └── file2 - * │ │ └── file3 - * │ └── bucket2 - * │ ├── file4 - * │ └── file5 + * └── vol + * ├── bucket1 (OBS) + * │ ├── file1 + * │ ├── file2 + * │ └── file3 + * └── bucket2 (OBS) + * ├── file4 + * └── file5 * └── vol2 - * ├── bucket3 + * ├── bucket3 (Legacy) * │ ├── file8 * │ ├── file9 * │ └── file10 - * └── bucket4 + * └── bucket4 (Legacy) * └── file11 * * Write these keys to OM and * replicate them. + * @throws Exception */ @SuppressWarnings("checkstyle:MethodLength") private void populateOMDB() throws Exception { @@ -652,7 +844,7 @@ private void populateOMDB() throws Exception { BUCKET_ONE_OBJECT_ID, VOL_OBJECT_ID, FILE_ONE_SIZE, - getBucketLayout()); + getOBSBucketLayout()); writeKeyToOm(reconOMMetadataManager, KEY_TWO, BUCKET_ONE, @@ -663,7 +855,7 @@ private void populateOMDB() throws Exception { BUCKET_ONE_OBJECT_ID, VOL_OBJECT_ID, FILE_TWO_SIZE, - getBucketLayout()); + getOBSBucketLayout()); writeKeyToOm(reconOMMetadataManager, KEY_THREE, BUCKET_ONE, @@ -674,7 +866,7 @@ private void populateOMDB() throws Exception { BUCKET_ONE_OBJECT_ID, VOL_OBJECT_ID, FILE_THREE_SIZE, - getBucketLayout()); + getOBSBucketLayout()); writeKeyToOm(reconOMMetadataManager, KEY_FOUR, BUCKET_TWO, @@ -685,7 +877,7 @@ private void populateOMDB() throws Exception { BUCKET_TWO_OBJECT_ID, VOL_OBJECT_ID, FILE_FOUR_SIZE, - getBucketLayout()); + getOBSBucketLayout()); writeKeyToOm(reconOMMetadataManager, KEY_FIVE, BUCKET_TWO, @@ -696,7 +888,7 @@ private void populateOMDB() throws Exception { BUCKET_TWO_OBJECT_ID, VOL_OBJECT_ID, FILE_FIVE_SIZE, - getBucketLayout()); + getOBSBucketLayout()); writeKeyToOm(reconOMMetadataManager, KEY_EIGHT, @@ -708,7 +900,7 @@ private void populateOMDB() throws Exception { BUCKET_THREE_OBJECT_ID, VOL_TWO_OBJECT_ID, FILE_EIGHT_SIZE, - getBucketLayout()); + getLegacyBucketLayout()); writeKeyToOm(reconOMMetadataManager, KEY_NINE, BUCKET_THREE, @@ -719,7 +911,7 @@ private void populateOMDB() throws Exception { BUCKET_THREE_OBJECT_ID, VOL_TWO_OBJECT_ID, FILE_NINE_SIZE, - getBucketLayout()); + getLegacyBucketLayout()); writeKeyToOm(reconOMMetadataManager, KEY_TEN, BUCKET_THREE, @@ -730,7 +922,7 @@ private void populateOMDB() throws Exception { BUCKET_THREE_OBJECT_ID, VOL_TWO_OBJECT_ID, FILE_TEN_SIZE, - getBucketLayout()); + getLegacyBucketLayout()); writeKeyToOm(reconOMMetadataManager, KEY_ELEVEN, BUCKET_FOUR, @@ -741,7 +933,7 @@ private void populateOMDB() throws Exception { BUCKET_FOUR_OBJECT_ID, VOL_TWO_OBJECT_ID, FILE_ELEVEN_SIZE, - getBucketLayout()); + getLegacyBucketLayout()); } /** @@ -788,7 +980,7 @@ private static OMMetadataManager initializeNewOmMetadataManager( .setBucketName(BUCKET_ONE) .setObjectID(BUCKET_ONE_OBJECT_ID) .setQuotaInBytes(BUCKET_ONE_QUOTA) - .setBucketLayout(getBucketLayout()) + .setBucketLayout(getOBSBucketLayout()) .build(); OmBucketInfo bucketInfo2 = OmBucketInfo.newBuilder() @@ -796,7 +988,7 @@ private static OMMetadataManager initializeNewOmMetadataManager( .setBucketName(BUCKET_TWO) .setObjectID(BUCKET_TWO_OBJECT_ID) .setQuotaInBytes(BUCKET_TWO_QUOTA) - .setBucketLayout(getBucketLayout()) + .setBucketLayout(getOBSBucketLayout()) .build(); OmBucketInfo bucketInfo3 = OmBucketInfo.newBuilder() @@ -804,7 +996,7 @@ private static OMMetadataManager initializeNewOmMetadataManager( .setBucketName(BUCKET_THREE) .setObjectID(BUCKET_THREE_OBJECT_ID) .setQuotaInBytes(BUCKET_THREE_QUOTA) - .setBucketLayout(getBucketLayout()) + .setBucketLayout(getLegacyBucketLayout()) .build(); OmBucketInfo bucketInfo4 = OmBucketInfo.newBuilder() @@ -812,7 +1004,7 @@ private static OMMetadataManager initializeNewOmMetadataManager( .setBucketName(BUCKET_FOUR) .setObjectID(BUCKET_FOUR_OBJECT_ID) .setQuotaInBytes(BUCKET_FOUR_QUOTA) - .setBucketLayout(getBucketLayout()) + .setBucketLayout(getLegacyBucketLayout()) .build(); String bucketKey = omMetadataManager.getBucketKey( @@ -847,7 +1039,7 @@ private void setUpMultiBlockKey() throws IOException { BUCKET_ONE_OBJECT_ID, VOL_OBJECT_ID, Collections.singletonList(locationInfoGroup), - getBucketLayout(), + getOBSBucketLayout(), FILE_THREE_SIZE); } @@ -920,7 +1112,7 @@ private void setUpMultiBlockReplicatedKeys() throws IOException { BUCKET_ONE_OBJECT_ID, VOL_OBJECT_ID, Collections.singletonList(locationInfoGroup1), - getBucketLayout(), + getOBSBucketLayout(), FILE_ONE_SIZE); //vol/bucket1/file2 @@ -934,7 +1126,7 @@ private void setUpMultiBlockReplicatedKeys() throws IOException { BUCKET_ONE_OBJECT_ID, VOL_OBJECT_ID, Collections.singletonList(locationInfoGroup2), - getBucketLayout(), + getOBSBucketLayout(), FILE_TWO_SIZE); //vol/bucket1/file3 @@ -948,7 +1140,7 @@ private void setUpMultiBlockReplicatedKeys() throws IOException { BUCKET_ONE_OBJECT_ID, VOL_OBJECT_ID, Collections.singletonList(locationInfoGroup1), - getBucketLayout(), + getOBSBucketLayout(), FILE_THREE_SIZE); //vol/bucket2/file4 @@ -962,7 +1154,7 @@ private void setUpMultiBlockReplicatedKeys() throws IOException { BUCKET_TWO_OBJECT_ID, VOL_OBJECT_ID, Collections.singletonList(locationInfoGroup2), - getBucketLayout(), + getOBSBucketLayout(), FILE_FOUR_SIZE); //vol/bucket2/file5 @@ -976,7 +1168,7 @@ private void setUpMultiBlockReplicatedKeys() throws IOException { BUCKET_TWO_OBJECT_ID, VOL_OBJECT_ID, Collections.singletonList(locationInfoGroup1), - getBucketLayout(), + getOBSBucketLayout(), FILE_FIVE_SIZE); //vol2/bucket3/file8 @@ -990,7 +1182,7 @@ private void setUpMultiBlockReplicatedKeys() throws IOException { BUCKET_THREE_OBJECT_ID, VOL_TWO_OBJECT_ID, Collections.singletonList(locationInfoGroup2), - getBucketLayout(), + getLegacyBucketLayout(), FILE_EIGHT_SIZE); //vol2/bucket3/file9 @@ -1004,7 +1196,7 @@ private void setUpMultiBlockReplicatedKeys() throws IOException { BUCKET_THREE_OBJECT_ID, VOL_TWO_OBJECT_ID, Collections.singletonList(locationInfoGroup1), - getBucketLayout(), + getLegacyBucketLayout(), FILE_NINE_SIZE); //vol2/bucket3/file10 @@ -1018,7 +1210,7 @@ private void setUpMultiBlockReplicatedKeys() throws IOException { BUCKET_THREE_OBJECT_ID, VOL_TWO_OBJECT_ID, Collections.singletonList(locationInfoGroup2), - getBucketLayout(), + getLegacyBucketLayout(), FILE_TEN_SIZE); //vol2/bucket4/file11 @@ -1032,7 +1224,7 @@ private void setUpMultiBlockReplicatedKeys() throws IOException { BUCKET_FOUR_OBJECT_ID, VOL_TWO_OBJECT_ID, Collections.singletonList(locationInfoGroup1), - getBucketLayout(), + getLegacyBucketLayout(), FILE_ELEVEN_SIZE); } @@ -1115,10 +1307,14 @@ private static ReconStorageContainerManagerFacade getMockReconSCM() return reconSCM; } - private static BucketLayout getBucketLayout() { + private static BucketLayout getOBSBucketLayout() { return BucketLayout.OBJECT_STORE; } + private static BucketLayout getLegacyBucketLayout() { + return BucketLayout.LEGACY; + } + private static SCMNodeStat getMockSCMRootStat() { return new SCMNodeStat(ROOT_QUOTA, ROOT_DATA_SIZE, ROOT_QUOTA - ROOT_DATA_SIZE, 0L, 0L); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacyObjectStoreLayout.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacyObjectStoreLayout.java index 8f8b605d31d4..df51a722fab4 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacyObjectStoreLayout.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacyObjectStoreLayout.java @@ -367,8 +367,8 @@ public void testProcessFileBucketSize() { /** * Populate OMDB with the following configs. - * vol - * / \ + * vol + * / \ * bucket1 bucket2 * / \ \ \ \ * key1 key2 key3 key4 key5 From 76c0262e0f0aac4e3f881c760947cfbc4c30b588 Mon Sep 17 00:00:00 2001 From: arafat Date: Thu, 7 Mar 2024 21:38:30 +0530 Subject: [PATCH 10/19] Fixed config flag for legacy buckets --- .../api/TestNSSummaryEndpointWithOBSAndLegacy.java | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java index b57eef5e6ace..895d87efa588 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java @@ -146,14 +146,14 @@ public class TestNSSummaryEndpointWithOBSAndLegacy { private static final String BUCKET_THREE = "bucket3"; private static final String BUCKET_FOUR = "bucket4"; private static final String KEY_ONE = "file1"; - private static final String KEY_TWO = "file2"; - private static final String KEY_THREE = "file3"; + private static final String KEY_TWO = "////file2"; + private static final String KEY_THREE = "file3///"; private static final String KEY_FOUR = "file4"; - private static final String KEY_FIVE = "file5"; + private static final String KEY_FIVE = "//////"; private static final String KEY_EIGHT = "file8"; - private static final String KEY_NINE = "file9"; - private static final String KEY_TEN = "file10"; - private static final String KEY_ELEVEN = "file11"; + private static final String KEY_NINE = "//////"; + private static final String KEY_TEN = "///__file10"; + private static final String KEY_ELEVEN = "////file11"; private static final String MULTI_BLOCK_FILE = KEY_THREE; private static final long PARENT_OBJECT_ID_ZERO = 0L; @@ -948,7 +948,7 @@ private static OMMetadataManager initializeNewOmMetadataManager( omConfiguration.set(OZONE_OM_DB_DIRS, omDbDir.getAbsolutePath()); omConfiguration.set(OMConfigKeys - .OZONE_OM_ENABLE_FILESYSTEM_PATHS, "true"); + .OZONE_OM_ENABLE_FILESYSTEM_PATHS, "false"); OMMetadataManager omMetadataManager = new OmMetadataManagerImpl( omConfiguration, null); From cb83bdaa704ec01bb34c70b62a6be34ae1688486 Mon Sep 17 00:00:00 2001 From: arafat Date: Thu, 7 Mar 2024 22:02:00 +0530 Subject: [PATCH 11/19] Added more unit tests for the key level --- ...TestNSSummaryEndpointWithOBSAndLegacy.java | 86 +++++++++++++++++-- 1 file changed, 77 insertions(+), 9 deletions(-) diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java index 895d87efa588..c37d743bb4e3 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java @@ -149,7 +149,7 @@ public class TestNSSummaryEndpointWithOBSAndLegacy { private static final String KEY_TWO = "////file2"; private static final String KEY_THREE = "file3///"; private static final String KEY_FOUR = "file4"; - private static final String KEY_FIVE = "//////"; + private static final String KEY_FIVE = "_//////"; private static final String KEY_EIGHT = "file8"; private static final String KEY_NINE = "//////"; private static final String KEY_TEN = "///__file10"; @@ -299,7 +299,25 @@ public class TestNSSummaryEndpointWithOBSAndLegacy { ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_THREE; private static final String BUCKET_FOUR_PATH = ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_FOUR; - private static final String KEY_PATH = + private static final String KEY_ONE_PATH = + ROOT_PATH + VOL + ROOT_PATH + BUCKET_ONE + ROOT_PATH + KEY_ONE; + private static final String KEY_TWO_PATH = + ROOT_PATH + VOL + ROOT_PATH + BUCKET_ONE + ROOT_PATH + KEY_TWO; + private static final String KEY_THREE_PATH = + ROOT_PATH + VOL + ROOT_PATH + BUCKET_ONE + ROOT_PATH + KEY_THREE; + private static final String KEY_FOUR_PATH = + ROOT_PATH + VOL + ROOT_PATH + BUCKET_TWO + ROOT_PATH + KEY_FOUR; + private static final String KEY_FIVE_PATH = + ROOT_PATH + VOL + ROOT_PATH + BUCKET_TWO + ROOT_PATH + KEY_FIVE; + private static final String KEY_EIGHT_PATH = + ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_THREE + ROOT_PATH + KEY_EIGHT; + private static final String KEY_NINE_PATH = + ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_THREE + ROOT_PATH + KEY_NINE; + private static final String KEY_TEN_PATH = + ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_THREE + ROOT_PATH + KEY_TEN; + private static final String KEY_ELEVEN_PATH = + ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_FOUR + ROOT_PATH + KEY_ELEVEN; + private static final String KEY4_PATH = ROOT_PATH + VOL + ROOT_PATH + BUCKET_TWO + ROOT_PATH + KEY_FOUR; private static final String MULTI_BLOCK_KEY_PATH = ROOT_PATH + VOL + ROOT_PATH + BUCKET_ONE + ROOT_PATH + KEY_THREE; @@ -640,13 +658,63 @@ public void testDiskUsageBucketThree() throws Exception { } @Test - public void testDiskUsageKey() throws Exception { + public void testDiskUsageKey1() throws Exception { + // key level DU + Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY_ONE_PATH, + false, false); + DUResponse duKeyResponse = (DUResponse) keyResponse.getEntity(); + assertEquals(0, duKeyResponse.getCount()); + assertEquals(FILE_ONE_SIZE, duKeyResponse.getSize()); + } + + @Test + public void testDiskUsageKey2() throws Exception { + // key level DU + Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY_TWO_PATH, + false, false); + DUResponse duKeyResponse = (DUResponse) keyResponse.getEntity(); + assertEquals(0, duKeyResponse.getCount()); + assertEquals(FILE_TWO_SIZE, duKeyResponse.getSize()); + } + + @Test + public void testDiskUsageKey4() throws Exception { + // key level DU + Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY4_PATH, + true, false); + DUResponse duKeyResponse = (DUResponse) keyResponse.getEntity(); + assertEquals(0, duKeyResponse.getCount()); + assertEquals(FILE_FOUR_SIZE, duKeyResponse.getSize()); + } + + @Test + public void testDiskUsageKey5() throws Exception { + // key level DU + Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY_FIVE_PATH, + false, false); + DUResponse duKeyResponse = (DUResponse) keyResponse.getEntity(); + assertEquals(0, duKeyResponse.getCount()); + assertEquals(FILE_FIVE_SIZE, duKeyResponse.getSize()); + } + + @Test + public void testDiskUsageKey8() throws Exception { + // key level DU + Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY_EIGHT_PATH, + false, false); + DUResponse duKeyResponse = (DUResponse) keyResponse.getEntity(); + assertEquals(0, duKeyResponse.getCount()); + assertEquals(FILE_EIGHT_SIZE, duKeyResponse.getSize()); + } + + @Test + public void testDiskUsageKey11() throws Exception { // key level DU - Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY_PATH, + Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY_ELEVEN_PATH, false, false); - DUResponse keyObj = (DUResponse) keyResponse.getEntity(); - assertEquals(0, keyObj.getCount()); - assertEquals(FILE_FOUR_SIZE, keyObj.getSize()); + DUResponse duKeyResponse = (DUResponse) keyResponse.getEntity(); + assertEquals(0, duKeyResponse.getCount()); + assertEquals(FILE_ELEVEN_SIZE, duKeyResponse.getSize()); } @Test @@ -723,7 +791,7 @@ public void testDataSizeUnderBucketThreeWithReplication() throws IOException { @Test public void testDataSizeUnderKeyWithReplication() throws IOException { setUpMultiBlockReplicatedKeys(); - Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY_PATH, + Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY4_PATH, false, true); DUResponse replicaDUResponse = (DUResponse) keyResponse.getEntity(); assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus()); @@ -771,7 +839,7 @@ public void testQuotaUsage() throws Exception { assertEquals(BUCKET_FOUR_DATA_SIZE, quBucketRes4.getQuotaUsed()); // other level not applicable - Response naResponse2 = nsSummaryEndpoint.getQuotaUsage(KEY_PATH); + Response naResponse2 = nsSummaryEndpoint.getQuotaUsage(KEY4_PATH); QuotaUsageResponse quotaUsageResponse2 = (QuotaUsageResponse) naResponse2.getEntity(); assertEquals(ResponseStatus.TYPE_NOT_APPLICABLE, From d15231fb9df7a12f4f915a7cdc132a2d46acef25 Mon Sep 17 00:00:00 2001 From: arafat Date: Thu, 7 Mar 2024 22:21:37 +0530 Subject: [PATCH 12/19] Added licence header and fixed checkstyle issues --- ...TestNSSummaryTaskWithLegacyOBSLayout.java} | 31 ++++++++++++++++--- 1 file changed, 26 insertions(+), 5 deletions(-) rename hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/{TestNSSummaryTaskWithLegacyObjectStoreLayout.java => TestNSSummaryTaskWithLegacyOBSLayout.java} (93%) diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacyObjectStoreLayout.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacyOBSLayout.java similarity index 93% rename from hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacyObjectStoreLayout.java rename to hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacyOBSLayout.java index df51a722fab4..db4803676390 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacyObjectStoreLayout.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacyOBSLayout.java @@ -1,3 +1,21 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.ozone.recon.tasks; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; @@ -30,14 +48,17 @@ import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.*; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDirToOm; -import static org.junit.jupiter.api.Assertions.*; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeKeyToOm; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getMockOzoneManagerServiceProviderWithFSO; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; /** * Test for NSSummaryTaskWithLegacy focusing on the OBS (Object Store) layout. */ -public final class TestNSSummaryTaskWithLegacyObjectStoreLayout { +public final class TestNSSummaryTaskWithLegacyOBSLayout { private static ReconNamespaceSummaryManager reconNamespaceSummaryManager; private static ReconOMMetadataManager reconOMMetadataManager; @@ -84,7 +105,7 @@ public final class TestNSSummaryTaskWithLegacyObjectStoreLayout { private static final long KEY_SIX_SIZE = 6000L; private static final long KEY_SEVEN_SIZE = 7000L; - private TestNSSummaryTaskWithLegacyObjectStoreLayout() { + private TestNSSummaryTaskWithLegacyOBSLayout() { } @BeforeAll From ab9cb00e382b8194e16cc8b13921d0730c132224 Mon Sep 17 00:00:00 2001 From: arafat Date: Thu, 7 Mar 2024 22:24:08 +0530 Subject: [PATCH 13/19] Renamed a variable --- .../hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java index 17ae2571681e..f537fd4ec370 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java @@ -47,7 +47,7 @@ */ public class NSSummaryTaskWithLegacy extends NSSummaryTaskDbEventHandler { - private static final BucketLayout BUCKET_LAYOUT = BucketLayout.LEGACY; + private static final BucketLayout LEGACY_BUCKET_LAYOUT = BucketLayout.LEGACY; private static final Logger LOG = LoggerFactory.getLogger(NSSummaryTaskWithLegacy.class); @@ -242,7 +242,7 @@ public boolean reprocessWithLegacy(OMMetadataManager omMetadataManager) { try { Table keyTable = - omMetadataManager.getKeyTable(BUCKET_LAYOUT); + omMetadataManager.getKeyTable(LEGACY_BUCKET_LAYOUT); try (TableIterator> keyTableIter = keyTable.iterator()) { @@ -320,7 +320,7 @@ private void setParentDirectoryId(OmKeyInfo keyInfo) throws IOException { getReconOMMetadataManager().getOzoneKey(keyInfo.getVolumeName(), keyInfo.getBucketName(), parentKeyName); OmKeyInfo parentKeyInfo = getReconOMMetadataManager() - .getKeyTable(BUCKET_LAYOUT) + .getKeyTable(LEGACY_BUCKET_LAYOUT) .getSkipCache(fullParentKeyName); if (parentKeyInfo != null) { @@ -369,7 +369,7 @@ private boolean isBucketLayoutValid(ReconOMMetadataManager metadataManager, OmBucketInfo omBucketInfo = metadataManager.getBucketTable().getSkipCache(bucketDBKey); - if (omBucketInfo.getBucketLayout() != BUCKET_LAYOUT) { + if (omBucketInfo.getBucketLayout() != LEGACY_BUCKET_LAYOUT) { LOG.debug( "Skipping processing for bucket {} as bucket layout is not LEGACY", bucketName); From f637dd64e8616e4aa8a47ca671d557f21e13e6bf Mon Sep 17 00:00:00 2001 From: arafat Date: Fri, 8 Mar 2024 18:59:38 +0530 Subject: [PATCH 14/19] Fixed failing UT's --- .../recon/api/handlers/BucketHandler.java | 22 +++++++++++++++---- .../recovery/ReconOMMetadataManager.java | 8 +++++++ .../recovery/ReconOmMetadataManagerImpl.java | 5 +++++ .../recon/OMMetadataManagerTestUtils.java | 7 ++++-- .../api/TestNSSummaryEndpointWithLegacy.java | 2 ++ 5 files changed, 38 insertions(+), 6 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java index 7ca0b2832e55..266caaa2d8e2 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java @@ -165,10 +165,8 @@ public static BucketHandler getBucketHandler( ReconOMMetadataManager omMetadataManager, OzoneStorageContainerManager reconSCM, OmBucketInfo bucketInfo) throws IOException { - OzoneConfiguration configuration = new OzoneConfiguration(); - boolean enableFileSystemPaths = configuration - .getBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, - OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS_DEFAULT); + // Check if enableFileSystemPaths flag is set to true. + boolean enableFileSystemPaths = isEnableFileSystemPaths(omMetadataManager); // If bucketInfo is null then entity type is UNKNOWN if (Objects.isNull(bucketInfo)) { @@ -201,6 +199,22 @@ public static BucketHandler getBucketHandler( } } + /** + * Determines whether FileSystemPaths are enabled for Legacy Buckets + * based on the Ozone configuration. + * + * @param ReconOMMetadataManager Instance + * @return True if FileSystemPaths are enabled, false otherwise. + */ + private static boolean isEnableFileSystemPaths(ReconOMMetadataManager omMetadataManager) { + OzoneConfiguration configuration = omMetadataManager.getOzoneConfiguration(); + if (configuration == null) { + configuration = new OzoneConfiguration(); + } + return configuration.getBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, + OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS_DEFAULT); + } + public static BucketHandler getBucketHandler( ReconNamespaceSummaryManager reconNamespaceSummaryManager, ReconOMMetadataManager omMetadataManager, diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java index 2040b7b343d9..1fc114eabd75 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java @@ -22,6 +22,7 @@ import java.io.IOException; import java.util.List; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; @@ -105,4 +106,11 @@ List listBucketsUnderVolume(String volumeName, */ List listBucketsUnderVolume( String volumeName) throws IOException; + + /** + * Return the OzoneConfiguration instance used by Recon. + * @return + */ + OzoneConfiguration getOzoneConfiguration(); + } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java index ad0526363df0..4b041f6511f6 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java @@ -291,6 +291,11 @@ public List listBucketsUnderVolume(final String volumeName) Integer.MAX_VALUE); } + @Override + public OzoneConfiguration getOzoneConfiguration() { + return ozoneConfiguration; + } + private List listAllBuckets(final int maxNumberOfBuckets) throws IOException { List result = new ArrayList<>(); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java index b1aecc9a4f4e..1a3592f53d21 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java @@ -22,6 +22,7 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.BUCKET_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DIRECTORY_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE; @@ -65,6 +66,7 @@ */ public final class OMMetadataManagerTestUtils { + public static OzoneConfiguration configuration; private OMMetadataManagerTestUtils() { } @@ -129,8 +131,9 @@ public static ReconOMMetadataManager getTestReconOmMetadataManager( DBCheckpoint checkpoint = omMetadataManager.getStore() .getCheckpoint(true); assertNotNull(checkpoint.getCheckpointLocation()); - - OzoneConfiguration configuration = new OzoneConfiguration(); + if (configuration == null) { + configuration = new OzoneConfiguration(); + } configuration.set(OZONE_RECON_OM_SNAPSHOT_DB_DIR, reconOmDbDir .getAbsolutePath()); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java index ba00f843f447..de7c7a1e9440 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java @@ -83,6 +83,7 @@ import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDirToOm; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getMockOzoneManagerServiceProvider; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.configuration; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -875,6 +876,7 @@ private static OMMetadataManager initializeNewOmMetadataManager( omDbDir.getAbsolutePath()); omConfiguration.set(OMConfigKeys .OZONE_OM_ENABLE_FILESYSTEM_PATHS, "true"); + configuration = omConfiguration; OMMetadataManager omMetadataManager = new OmMetadataManagerImpl( omConfiguration, null); From 3e5d5e6d7ffb602235148f1ca3db52355e786d08 Mon Sep 17 00:00:00 2001 From: arafat Date: Fri, 8 Mar 2024 19:11:37 +0530 Subject: [PATCH 15/19] Fixed checkstyle issues --- .../ozone/recon/OMMetadataManagerTestUtils.java | 13 +++++++++++-- .../recon/api/TestNSSummaryEndpointWithLegacy.java | 13 +++++++------ 2 files changed, 18 insertions(+), 8 deletions(-) diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java index 1a3592f53d21..a9ed342faad4 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java @@ -22,7 +22,6 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.BUCKET_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DIRECTORY_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE; @@ -66,7 +65,7 @@ */ public final class OMMetadataManagerTestUtils { - public static OzoneConfiguration configuration; + private static OzoneConfiguration configuration; private OMMetadataManagerTestUtils() { } @@ -504,4 +503,14 @@ public static OmKeyLocationInfo getOmKeyLocationInfo(BlockID blockID, public static BucketLayout getBucketLayout() { return BucketLayout.DEFAULT; } + + public static OzoneConfiguration getConfiguration() { + return configuration; + } + + public static void setConfiguration( + OzoneConfiguration configuration) { + OMMetadataManagerTestUtils.configuration = configuration; + } + } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java index de7c7a1e9440..765399f71e3a 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java @@ -74,16 +74,17 @@ import java.util.Set; import java.util.HashSet; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getMockOzoneManagerServiceProvider; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDirToOm; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeKeyToOm; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.setConfiguration; + import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeKeyToOm; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDirToOm; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getMockOzoneManagerServiceProvider; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.configuration; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -876,7 +877,7 @@ private static OMMetadataManager initializeNewOmMetadataManager( omDbDir.getAbsolutePath()); omConfiguration.set(OMConfigKeys .OZONE_OM_ENABLE_FILESYSTEM_PATHS, "true"); - configuration = omConfiguration; + setConfiguration(omConfiguration); OMMetadataManager omMetadataManager = new OmMetadataManagerImpl( omConfiguration, null); From 14a0382334077bd180512458ed85b221fe25f91b Mon Sep 17 00:00:00 2001 From: arafat Date: Tue, 26 Mar 2024 12:38:58 +0530 Subject: [PATCH 16/19] Added a test to test out the utility method normalizePathUptilBucket --- ...TestNSSummaryEndpointWithOBSAndLegacy.java | 29 +++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java index c37d743bb4e3..d56e18c46456 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hdds.scm.container.ContainerReplica; import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; +import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMMetadataManager; @@ -876,6 +877,34 @@ public void checkFileSizeDist(String path, int bin0, } } + @Test + public void testNormalizePathUptilBucket_AllScenarios() { + // Test null or empty path + assertEquals("/", OmUtils.normalizePathUptilBucket(null)); + assertEquals("/", OmUtils.normalizePathUptilBucket("")); + + // Test path with leading slashes + assertEquals("volume1/bucket1/key1/key2", + OmUtils.normalizePathUptilBucket("///volume1/bucket1/key1/key2")); + + // Test volume and bucket names + assertEquals("volume1/bucket1", + OmUtils.normalizePathUptilBucket("volume1/bucket1")); + + // Test with additional segments + assertEquals("volume1/bucket1/key1/key2", + OmUtils.normalizePathUptilBucket("volume1/bucket1/key1/key2")); + + // Test path with multiple slashes in key names. + assertEquals("volume1/bucket1/key1//key2", + OmUtils.normalizePathUptilBucket("volume1/bucket1/key1//key2")); + + // Test path with volume, bucket, and special characters in keys + assertEquals("volume/bucket/key$%#1/./////////key$%#2", + OmUtils.normalizePathUptilBucket("volume/bucket/key$%#1/./////////key$%#2")); + } + + /** * Testing the following case. * └── vol From 0b1f99c64bb461fee6bf09252ad3838cc9f47d26 Mon Sep 17 00:00:00 2001 From: arafat Date: Wed, 27 Mar 2024 16:16:48 +0530 Subject: [PATCH 17/19] Fixed checkstyle issue --- .../ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java index d56e18c46456..f0696127cdce 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java @@ -878,7 +878,7 @@ public void checkFileSizeDist(String path, int bin0, } @Test - public void testNormalizePathUptilBucket_AllScenarios() { + public void testNormalizePathUptilBucket() { // Test null or empty path assertEquals("/", OmUtils.normalizePathUptilBucket(null)); assertEquals("/", OmUtils.normalizePathUptilBucket("")); From efe3ebf55532544579265a1f2a8725a783270cb0 Mon Sep 17 00:00:00 2001 From: arafat Date: Thu, 28 Mar 2024 18:02:58 +0530 Subject: [PATCH 18/19] Renamed setParentDirectoryId to setKeyParentID --- .../hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java index f537fd4ec370..57294e7f0a89 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java @@ -136,7 +136,7 @@ private void processWithFileSystemLayout(OmKeyInfo updatedKeyInfo, OMDBUpdateEvent.OMDBUpdateAction action, Map nsSummaryMap) throws IOException { - setParentDirectoryId(updatedKeyInfo); + setKeyParentID(updatedKeyInfo); if (!updatedKeyInfo.getKeyName().endsWith(OM_KEY_PREFIX)) { switch (action) { @@ -150,7 +150,7 @@ private void processWithFileSystemLayout(OmKeyInfo updatedKeyInfo, case UPDATE: if (oldKeyInfo != null) { - setParentDirectoryId(oldKeyInfo); + setKeyParentID(oldKeyInfo); handleDeleteKeyEvent(oldKeyInfo, nsSummaryMap); } else { LOG.warn("Update event does not have the old keyInfo for {}.", @@ -261,7 +261,7 @@ public boolean reprocessWithLegacy(OMMetadataManager omMetadataManager) { if (enableFileSystemPaths) { // The LEGACY bucket is a file system bucket. - setParentDirectoryId(keyInfo); + setKeyParentID(keyInfo); if (keyInfo.getKeyName().endsWith(OM_KEY_PREFIX)) { OmDirectoryInfo directoryInfo = @@ -305,7 +305,7 @@ public boolean reprocessWithLegacy(OMMetadataManager omMetadataManager) { * @param keyInfo * @throws IOException */ - private void setParentDirectoryId(OmKeyInfo keyInfo) throws IOException { + private void setKeyParentID(OmKeyInfo keyInfo) throws IOException { String[] keyPath = keyInfo.getKeyName().split(OM_KEY_PREFIX); // If the path contains only one key then keyPath.length From b7df717748a91785bb51c2fde3d28bab1b51f3c4 Mon Sep 17 00:00:00 2001 From: arafat Date: Fri, 29 Mar 2024 20:00:49 +0530 Subject: [PATCH 19/19] Made review changes --- .../java/org/apache/hadoop/ozone/OmUtils.java | 13 +++++++------ .../ozone/recon/api/handlers/EntityHandler.java | 4 ++-- .../recon/tasks/NSSummaryTaskWithLegacy.java | 2 +- .../TestNSSummaryEndpointWithOBSAndLegacy.java | 16 ++++++++-------- 4 files changed, 18 insertions(+), 17 deletions(-) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java index f5ed8ff20556..b4777c7a016b 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java @@ -755,15 +755,15 @@ public static String normalizeKey(String keyName, * @param path The path string to be normalized. * @return The normalized path string. */ - public static String normalizePathUptilBucket(String path) { + public static String normalizePathUptoBucket(String path) { if (path == null || path.isEmpty()) { - return "/"; // Handle empty path + return OM_KEY_PREFIX; // Handle empty path } // Remove leading slashes path = path.replaceAll("^/*", ""); - String[] segments = path.split("/", -1); + String[] segments = path.split(OM_KEY_PREFIX, -1); String volumeName = segments[0]; String bucketName = segments.length > 1 ? segments[1] : ""; @@ -771,13 +771,14 @@ public static String normalizePathUptilBucket(String path) { // Combine volume and bucket. StringBuilder normalizedPath = new StringBuilder(volumeName); if (!bucketName.isEmpty()) { - normalizedPath.append("/").append(bucketName); + normalizedPath.append(OM_KEY_PREFIX).append(bucketName); } // Add remaining segments as the key if (segments.length > 2) { - normalizedPath.append("/").append( - String.join("/", Arrays.copyOfRange(segments, 2, segments.length))); + normalizedPath.append(OM_KEY_PREFIX).append( + String.join(OM_KEY_PREFIX, + Arrays.copyOfRange(segments, 2, segments.length))); } return normalizedPath.toString(); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java index 93c91c8e1d89..4f9e68ddff95 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java @@ -312,7 +312,7 @@ public static String[] parseObjectStorePath(String path) { * * This method adjusts the path according to the bucket layout. * For {OBJECT_STORE Layout}, it normalizes the path up to the bucket level - * using OmUtils.normalizePathUptilBucket. For other layouts, it + * using OmUtils.normalizePathUptoBucket. For other layouts, it * normalizes the entire path, including the key, using * OmUtils.normalizeKey, and does not preserve any trailing slashes. * The normalized path will always be prefixed with OM_KEY_PREFIX to ensure it @@ -324,7 +324,7 @@ public static String[] parseObjectStorePath(String path) { */ private static String normalizePath(String path, BucketLayout bucketLayout) { if (bucketLayout == BucketLayout.OBJECT_STORE) { - return OM_KEY_PREFIX + OmUtils.normalizePathUptilBucket(path); + return OM_KEY_PREFIX + OmUtils.normalizePathUptoBucket(path); } return OM_KEY_PREFIX + OmUtils.normalizeKey(path, false); } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java index 57294e7f0a89..4555b976ffed 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java @@ -160,7 +160,7 @@ private void processWithFileSystemLayout(OmKeyInfo updatedKeyInfo, break; default: - LOG.debug("Skipping DB update event fir Key: {}", action); + LOG.debug("Skipping DB update event for Key: {}", action); } } else { OmDirectoryInfo updatedDirectoryInfo = new OmDirectoryInfo.Builder() diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java index f0696127cdce..8d8299aefc18 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java @@ -878,30 +878,30 @@ public void checkFileSizeDist(String path, int bin0, } @Test - public void testNormalizePathUptilBucket() { + public void testNormalizePathUptoBucket() { // Test null or empty path - assertEquals("/", OmUtils.normalizePathUptilBucket(null)); - assertEquals("/", OmUtils.normalizePathUptilBucket("")); + assertEquals("/", OmUtils.normalizePathUptoBucket(null)); + assertEquals("/", OmUtils.normalizePathUptoBucket("")); // Test path with leading slashes assertEquals("volume1/bucket1/key1/key2", - OmUtils.normalizePathUptilBucket("///volume1/bucket1/key1/key2")); + OmUtils.normalizePathUptoBucket("///volume1/bucket1/key1/key2")); // Test volume and bucket names assertEquals("volume1/bucket1", - OmUtils.normalizePathUptilBucket("volume1/bucket1")); + OmUtils.normalizePathUptoBucket("volume1/bucket1")); // Test with additional segments assertEquals("volume1/bucket1/key1/key2", - OmUtils.normalizePathUptilBucket("volume1/bucket1/key1/key2")); + OmUtils.normalizePathUptoBucket("volume1/bucket1/key1/key2")); // Test path with multiple slashes in key names. assertEquals("volume1/bucket1/key1//key2", - OmUtils.normalizePathUptilBucket("volume1/bucket1/key1//key2")); + OmUtils.normalizePathUptoBucket("volume1/bucket1/key1//key2")); // Test path with volume, bucket, and special characters in keys assertEquals("volume/bucket/key$%#1/./////////key$%#2", - OmUtils.normalizePathUptilBucket("volume/bucket/key$%#1/./////////key$%#2")); + OmUtils.normalizePathUptoBucket("volume/bucket/key$%#1/./////////key$%#2")); }