From f66979fea827c360475aed20d4831618d9d5b248 Mon Sep 17 00:00:00 2001 From: arafat Date: Sun, 7 Apr 2024 20:39:27 +0530 Subject: [PATCH 01/21] Adding a new parameter to NSSummary and exposing it --- .../recon/api/handlers/BucketEntityHandler.java | 1 + .../api/handlers/DirectoryEntityHandler.java | 2 +- .../recon/api/handlers/VolumeEntityHandler.java | 1 + .../hadoop/ozone/recon/api/types/DUResponse.java | 15 +++++++++++++++ .../hadoop/ozone/recon/api/types/NSSummary.java | 15 +++++++++++++-- .../hadoop/ozone/recon/codec/NSSummaryCodec.java | 12 ++++++++++-- .../recon/tasks/NSSummaryTaskDbEventHandler.java | 3 +++ .../TestReconNamespaceSummaryManagerImpl.java | 6 +++--- 8 files changed, 47 insertions(+), 8 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketEntityHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketEntityHandler.java index 7ad961195ee7..13846330f604 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketEntityHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketEntityHandler.java @@ -143,6 +143,7 @@ public DUResponse getDuResponse( duResponse.setCount(dirDUData.size()); duResponse.setSize(bucketDataSize); duResponse.setDuData(dirDUData); + duResponse.setParentId(bucketNSSummary.getParentId()); return duResponse; } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/DirectoryEntityHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/DirectoryEntityHandler.java index fc7022e2dab2..4712a13ba79a 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/DirectoryEntityHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/DirectoryEntityHandler.java @@ -155,7 +155,7 @@ public DUResponse getDuResponse( duResponse.setCount(subdirDUData.size()); duResponse.setSize(dirDataSize); duResponse.setDuData(subdirDUData); - + duResponse.setParentId(dirNSSummary.getParentId()); return duResponse; } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/VolumeEntityHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/VolumeEntityHandler.java index fae508a99c9d..367129691351 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/VolumeEntityHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/VolumeEntityHandler.java @@ -132,6 +132,7 @@ public DUResponse getDuResponse( } duResponse.setSize(volDataSize); duResponse.setDuData(bucketDuData); + duResponse.setParentId(0L); return duResponse; } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DUResponse.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DUResponse.java index b28d9d39c210..c04a60798e7b 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DUResponse.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DUResponse.java @@ -54,12 +54,16 @@ public class DUResponse { @JsonProperty("sizeDirectKey") private long keySize; + @JsonProperty("parentId") + private long parentId; + public DUResponse() { this.status = ResponseStatus.OK; this.duData = new ArrayList<>(); // by default, the replication feature is disabled this.sizeWithReplica = -1L; this.keySize = -1L; + this.parentId = -1L; } public ResponseStatus getStatus() { @@ -118,6 +122,17 @@ public void setKeySize(long keySize) { this.keySize = keySize; } + + // Existing getters and setters + + public long getParentId() { + return parentId; + } + + public void setParentId(long parentId) { + this.parentId = parentId; + } + /** * DU info for a path (path name, data size). */ diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NSSummary.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NSSummary.java index c0f93aebe97d..c51769928356 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NSSummary.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NSSummary.java @@ -36,22 +36,25 @@ public class NSSummary { private int[] fileSizeBucket; private Set childDir; private String dirName; + private long parentId = -1; public NSSummary() { this(0, 0L, new int[ReconConstants.NUM_OF_FILE_SIZE_BINS], - new HashSet<>(), ""); + new HashSet<>(), "", -1); // -1 can be a default value indicating no parent } public NSSummary(int numOfFiles, long sizeOfFiles, int[] bucket, Set childDir, - String dirName) { + String dirName, + long parentId) { this.numOfFiles = numOfFiles; this.sizeOfFiles = sizeOfFiles; setFileSizeBucket(bucket); this.childDir = childDir; this.dirName = dirName; + this.parentId = parentId; } public int getNumOfFiles() { @@ -107,4 +110,12 @@ public void removeChildDir(long childId) { this.childDir.remove(childId); } } + + public long getParentId() { + return parentId; + } + + public void setParentId(long parentId) { + this.parentId = parentId; + } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java index 09e0b2587934..63eddc34e8d1 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java @@ -65,9 +65,10 @@ public byte[] toPersistedFormat(NSSummary object) throws IOException { int stringLen = dirName.getBytes(StandardCharsets.UTF_8).length; int numOfChildDirs = childDirs.size(); final int resSize = NUM_OF_INTS * Integer.BYTES - + (numOfChildDirs + 1) * Long.BYTES // 1 long field + list size + + (numOfChildDirs + 1) * Long.BYTES // 1 long field for parentId + list size + Short.BYTES // 2 dummy shorts to track length - + stringLen; // directory name length + + stringLen // directory name length + + Long.BYTES; // Added space for parentId serialization ByteArrayOutputStream out = new ByteArrayOutputStream(resSize); out.write(integerCodec.toPersistedFormat(object.getNumOfFiles())); @@ -84,6 +85,8 @@ public byte[] toPersistedFormat(NSSummary object) throws IOException { } out.write(integerCodec.toPersistedFormat(stringLen)); out.write(stringCodec.toPersistedFormat(dirName)); + out.write(longCodec.toPersistedFormat(object.getParentId())); + return out.toByteArray(); } @@ -110,6 +113,8 @@ public NSSummary fromPersistedFormat(byte[] rawData) throws IOException { int strLen = in.readInt(); if (strLen == 0) { + long parentId = in.readLong(); // Deserialize parentId + res.setParentId(parentId); return res; } byte[] buffer = new byte[strLen]; @@ -117,6 +122,8 @@ public NSSummary fromPersistedFormat(byte[] rawData) throws IOException { assert (bytesRead == strLen); String dirName = stringCodec.fromPersistedFormat(buffer); res.setDirName(dirName); + long parentId = in.readLong(); + res.setParentId(parentId); return res; } @@ -128,6 +135,7 @@ public NSSummary copyObject(NSSummary object) { copy.setFileSizeBucket(object.getFileSizeBucket()); copy.setChildDir(object.getChildDir()); copy.setDirName(object.getDirName()); + copy.setParentId(object.getParentId()); return copy; } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java index f00d83e64a52..5ca49a13c157 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java @@ -132,6 +132,9 @@ protected void handlePutDirEvent(OmDirectoryInfo directoryInfo, curNSSummary = new NSSummary(); } curNSSummary.setDirName(dirName); + if (parentObjectId != -1) { + curNSSummary.setParentId(parentObjectId); + } nsSummaryMap.put(objectId, curNSSummary); // Write the child dir list to the parent directory diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconNamespaceSummaryManagerImpl.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconNamespaceSummaryManagerImpl.java index fbddd50ee4cb..2ed85aa5271c 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconNamespaceSummaryManagerImpl.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconNamespaceSummaryManagerImpl.java @@ -114,9 +114,9 @@ public void testInitNSSummaryTable() throws IOException { private void putThreeNSMetadata() throws IOException { HashMap hmap = new HashMap<>(); - hmap.put(1L, new NSSummary(1, 2, testBucket, TEST_CHILD_DIR, "dir1")); - hmap.put(2L, new NSSummary(3, 4, testBucket, TEST_CHILD_DIR, "dir2")); - hmap.put(3L, new NSSummary(5, 6, testBucket, TEST_CHILD_DIR, "dir3")); + hmap.put(1L, new NSSummary(1, 2, testBucket, TEST_CHILD_DIR, "dir1",-1)); + hmap.put(2L, new NSSummary(3, 4, testBucket, TEST_CHILD_DIR, "dir2", -1)); + hmap.put(3L, new NSSummary(5, 6, testBucket, TEST_CHILD_DIR, "dir3", -1)); RDBBatchOperation rdbBatchOperation = new RDBBatchOperation(); for (Map.Entry entry: hmap.entrySet()) { reconNamespaceSummaryManager.batchStoreNSSummaries(rdbBatchOperation, From 4dbab9a975354b886bb14fa3b1a85262893265dd Mon Sep 17 00:00:00 2001 From: arafat Date: Mon, 8 Apr 2024 00:38:52 +0530 Subject: [PATCH 02/21] HDDS-10608. Recon can't get full key when using Recon API. --- .../apache/hadoop/ozone/recon/ReconUtils.java | 57 +++++++++- .../ozone/recon/api/ContainerEndpoint.java | 102 ++++++++++-------- .../api/handlers/BucketEntityHandler.java | 1 - .../api/handlers/DirectoryEntityHandler.java | 1 - .../api/handlers/VolumeEntityHandler.java | 1 - .../ozone/recon/api/types/DUResponse.java | 11 -- .../ozone/recon/api/types/KeyMetadata.java | 11 ++ .../ozone/recon/api/types/KeysResponse.java | 9 +- .../tasks/NSSummaryTaskDbEventHandler.java | 1 + .../api/TestNSSummaryEndpointWithFSO.java | 79 ++++++++++++-- .../api/TestNSSummaryEndpointWithLegacy.java | 53 +++++++-- ...TestNSSummaryEndpointWithOBSAndLegacy.java | 77 ++++++++++--- .../recon/tasks/TestNSSummaryTaskWithFSO.java | 56 +++++++++- 13 files changed, 367 insertions(+), 92 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java index 39d091ee03c8..a8a771e5fa19 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java @@ -54,12 +54,17 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_EVENT_THREAD_POOL_SIZE_DEFAULT; import static org.apache.hadoop.hdds.server.ServerUtils.getDirectoryFromConfig; import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath; +import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SCM_DB_DIR; import static org.jooq.impl.DSL.currentTimestamp; import static org.jooq.impl.DSL.select; import static org.jooq.impl.DSL.using; +import org.apache.hadoop.ozone.OmUtils; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.recon.api.types.NSSummary; import org.apache.hadoop.ozone.recon.scm.ReconContainerReportQueue; +import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.hadoop.ozone.recon.schema.tables.daos.GlobalStatsDao; import org.hadoop.ozone.recon.schema.tables.pojos.GlobalStats; @@ -244,25 +249,69 @@ public void untarCheckpointFile(File tarFile, Path destPath) } } + + /** + * Constructs the full path of a key from its OmKeyInfo using a bottom-up approach, starting from the leaf node. + *

+ * The method begins with the leaf node (the key itself) and recursively prepends parent directory names, fetched + * via NSSummary objects, until reaching the parent bucket (parentId is -1). It effectively builds the path from + * bottom to top, finally prepending the volume and bucket names to complete the full path. + * + * @param omKeyInfo The OmKeyInfo object for the key + * @return The constructed full path of the key as a String. + * @throws IOException + */ + public static String constructFullPath(OmKeyInfo omKeyInfo, + ReconNamespaceSummaryManager reconNamespaceSummaryManager) + throws IOException { + StringBuilder fullPath = new StringBuilder(omKeyInfo.getKeyName()); + long parentId = omKeyInfo.getParentObjectID(); + boolean isDirectoryPresent = false; + while (parentId != -1) { + NSSummary nsSummary = reconNamespaceSummaryManager.getNSSummary(parentId); + if (nsSummary == null) { + break; + } + // Prepend the directory name to the path + fullPath.insert(0, nsSummary.getDirName() + OM_KEY_PREFIX); + + // Move to the parent ID of the current directory + parentId = nsSummary.getParentId(); + isDirectoryPresent = true; + } + + // Prepend the volume and bucket to the constructed path + String volumeName = omKeyInfo.getVolumeName(); + String bucketName = omKeyInfo.getBucketName(); + fullPath.insert(0, volumeName + OM_KEY_PREFIX + bucketName + OM_KEY_PREFIX); + if (isDirectoryPresent) { + return OmUtils.normalizeKey(fullPath.toString(), true); + } + return fullPath.toString(); + } + + /** * Make HTTP GET call on the URL and return HttpURLConnection instance. + * * @param connectionFactory URLConnectionFactory to use. - * @param url url to call - * @param isSpnego is SPNEGO enabled + * @param url url to call + * @param isSpnego is SPNEGO enabled * @return HttpURLConnection instance of the HTTP call. * @throws IOException, AuthenticationException While reading the response. */ public HttpURLConnection makeHttpCall(URLConnectionFactory connectionFactory, - String url, boolean isSpnego) + String url, boolean isSpnego) throws IOException, AuthenticationException { HttpURLConnection urlConnection = (HttpURLConnection) - connectionFactory.openConnection(new URL(url), isSpnego); + connectionFactory.openConnection(new URL(url), isSpnego); urlConnection.connect(); return urlConnection; } /** * Load last known DB in Recon. + * * @param reconDbDir * @param fileNamePrefix * @return diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java index afc9c8a3239a..2d502608bb1d 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java @@ -31,6 +31,8 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; +import org.apache.hadoop.ozone.recon.api.types.NSSummary; +import org.apache.hadoop.ozone.recon.ReconUtils; import org.apache.hadoop.ozone.recon.api.types.ContainerDiscrepancyInfo; import org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix; import org.apache.hadoop.ozone.recon.api.types.ContainerMetadata; @@ -76,6 +78,7 @@ import java.util.HashMap; import java.util.stream.Collectors; +import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_FILTER_FOR_MISSING_CONTAINERS; import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_FILTER; import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_BATCH_NUMBER; @@ -144,8 +147,8 @@ public static DataFilter fromValue(String value) { @Inject public ContainerEndpoint(OzoneStorageContainerManager reconSCM, - ContainerHealthSchemaManager containerHealthSchemaManager, - ReconNamespaceSummaryManager reconNamespaceSummaryManager) { + ContainerHealthSchemaManager containerHealthSchemaManager, + ReconNamespaceSummaryManager reconNamespaceSummaryManager) { this.containerManager = (ReconContainerManager) reconSCM.getContainerManager(); this.pipelineManager = reconSCM.getPipelineManager(); @@ -158,19 +161,20 @@ public ContainerEndpoint(OzoneStorageContainerManager reconSCM, * Return @{@link org.apache.hadoop.hdds.scm.container} * for the containers starting from the given "prev-key" query param for the * given "limit". The given "prev-key" is skipped from the results returned. + * * @param prevKey the containerID after which results are returned. * start containerID, >=0, * start searching at the head if 0. - * @param limit max no. of containers to get. - * count must be >= 0 - * Usually the count will be replace with a very big - * value instead of being unlimited in case the db is very big. + * @param limit max no. of containers to get. + * count must be >= 0 + * Usually the count will be replace with a very big + * value instead of being unlimited in case the db is very big. * @return {@link Response} */ @GET public Response getContainers( @DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam(RECON_QUERY_LIMIT) - int limit, + int limit, @DefaultValue(PREV_CONTAINER_ID_DEFAULT_VALUE) @QueryParam(RECON_QUERY_PREVKEY) long prevKey) { if (limit < 0 || prevKey < 0) { @@ -212,8 +216,8 @@ public Response getContainers( * starting from the given "prev-key" query param for the given "limit". * The given prevKeyPrefix is skipped from the results returned. * - * @param containerID the given containerID. - * @param limit max no. of keys to get. + * @param containerID the given containerID. + * @param limit max no. of keys to get. * @param prevKeyPrefix the key prefix after which results are returned. * @return {@link Response} */ @@ -226,7 +230,12 @@ public Response getKeysForContainer( @DefaultValue(StringUtils.EMPTY) @QueryParam(RECON_QUERY_PREVKEY) String prevKeyPrefix) { Map keyMetadataMap = new LinkedHashMap<>(); + + // Total count of keys in the container. long totalCount; + // Last key prefix to be used for pagination. It will be exposed in the response. + String lastKey = ""; + try { Map containerKeyPrefixMap = reconContainerMetadataManager.getKeyPrefixesForContainer(containerID, @@ -263,6 +272,7 @@ public Response getKeysForContainer( omKeyInfo.getVolumeName(), omKeyInfo.getBucketName(), omKeyInfo.getKeyName()); + lastKey = ozoneKey; if (keyMetadataMap.containsKey(ozoneKey)) { keyMetadataMap.get(ozoneKey).getVersions() .add(containerKeyPrefix.getKeyVersion()); @@ -278,6 +288,8 @@ public Response getKeysForContainer( keyMetadata.setBucket(omKeyInfo.getBucketName()); keyMetadata.setVolume(omKeyInfo.getVolumeName()); keyMetadata.setKey(omKeyInfo.getKeyName()); + keyMetadata.setCompletePath(ReconUtils.constructFullPath(omKeyInfo, + reconNamespaceSummaryManager)); keyMetadata.setCreationTime( Instant.ofEpochMilli(omKeyInfo.getCreationTime())); keyMetadata.setModificationTime( @@ -298,7 +310,7 @@ public Response getKeysForContainer( Response.Status.INTERNAL_SERVER_ERROR); } KeysResponse keysResponse = - new KeysResponse(totalCount, keyMetadataMap.values()); + new KeysResponse(totalCount, keyMetadataMap.values(), lastKey); return Response.ok(keysResponse).build(); } @@ -334,7 +346,7 @@ public Response getMissingContainers( ) { List missingContainers = new ArrayList<>(); containerHealthSchemaManager.getUnhealthyContainers( - UnHealthyContainerStates.MISSING, 0, limit) + UnHealthyContainerStates.MISSING, 0, limit) .forEach(container -> { long containerID = container.getContainerId(); try { @@ -364,10 +376,10 @@ public Response getMissingContainers( * {@link org.apache.hadoop.ozone.recon.api.types.UnhealthyContainerMetadata} * for all unhealthy containers. * - * @param state Return only containers matching the given unhealthy state, - * eg UNDER_REPLICATED, MIS_REPLICATED, OVER_REPLICATED or - * MISSING. Passing null returns all containers. - * @param limit The limit of unhealthy containers to return. + * @param state Return only containers matching the given unhealthy state, + * eg UNDER_REPLICATED, MIS_REPLICATED, OVER_REPLICATED or + * MISSING. Passing null returns all containers. + * @param limit The limit of unhealthy containers to return. * @param batchNum The batch number (like "page number") of results to return. * Passing 1, will return records 1 to limit. 2 will return * limit + 1 to 2 * limit, etc. @@ -378,7 +390,7 @@ public Response getMissingContainers( public Response getUnhealthyContainers( @PathParam("state") String state, @DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam(RECON_QUERY_LIMIT) - int limit, + int limit, @DefaultValue(DEFAULT_BATCH_NUMBER) @QueryParam(RECON_QUERY_BATCH_PARAM) int batchNum) { int offset = Math.max(((batchNum - 1) * limit), 0); @@ -399,7 +411,8 @@ public Response getUnhealthyContainers( .getUnhealthyContainers(internalState, offset, limit); List emptyMissingFiltered = containers.stream() .filter( - container -> !container.getContainerState().equals(UnHealthyContainerStates.EMPTY_MISSING.toString())) + container -> !container.getContainerState() + .equals(UnHealthyContainerStates.EMPTY_MISSING.toString())) .collect( Collectors.toList()); for (UnhealthyContainers c : emptyMissingFiltered) { @@ -433,8 +446,8 @@ public Response getUnhealthyContainers( * Return * {@link org.apache.hadoop.ozone.recon.api.types.UnhealthyContainerMetadata} * for all unhealthy containers. - - * @param limit The limit of unhealthy containers to return. + * + * @param limit The limit of unhealthy containers to return. * @param batchNum The batch number (like "page number") of results to return. * Passing 1, will return records 1 to limit. 2 will return * limit + 1 to 2 * limit, etc. @@ -444,7 +457,7 @@ public Response getUnhealthyContainers( @Path("/unhealthy") public Response getUnhealthyContainers( @DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam(RECON_QUERY_LIMIT) - int limit, + int limit, @DefaultValue(DEFAULT_BATCH_NUMBER) @QueryParam(RECON_QUERY_BATCH_PARAM) int batchNum) { return getUnhealthyContainers(null, limit, batchNum); @@ -455,22 +468,23 @@ public Response getUnhealthyContainers( * { * containers: [ * { - * containerId: 1, - * state: DELETED, - * pipelineId: "a10ffab6-8ed5-414a-aaf5-79890ff3e8a1", - * numOfKeys: 3, - * inStateSince: + * containerId: 1, + * state: DELETED, + * pipelineId: "a10ffab6-8ed5-414a-aaf5-79890ff3e8a1", + * numOfKeys: 3, + * inStateSince: * }, * { - * containerId: 2, - * state: DELETED, - * pipelineId: "a10ffab6-8ed5-414a-aaf5-79890ff3e8a1", - * numOfKeys: 6, - * inStateSince: + * containerId: 2, + * state: DELETED, + * pipelineId: "a10ffab6-8ed5-414a-aaf5-79890ff3e8a1", + * numOfKeys: 6, + * inStateSince: * } * ] * } - * @param limit limits the number of deleted containers + * + * @param limit limits the number of deleted containers * @param prevKey previous container Id to skip * @return Response of deleted containers. */ @@ -519,6 +533,7 @@ public Response getSCMDeletedContainers( /** * Helper function to extract the blocks for a given container from a given * OM Key. + * * @param matchedKeys List of OM Key Info locations * @param containerID containerId. * @return List of blocks. @@ -542,17 +557,17 @@ private List getBlocks( /** * Retrieves the container mismatch insights. - * + *

* This method returns a list of ContainerDiscrepancyInfo objects representing * the containers that are missing in either the Ozone Manager (OM) or the * Storage Container Manager (SCM), based on the provided filter parameter. * The returned list is paginated based on the provided limit and prevKey * parameters. * - * @param limit The maximum number of container discrepancies to return. - * @param prevKey The container ID after which the results are returned. - * @param missingIn The missing filter parameter to specify if it's - * "OM" or "SCM" missing containers to be returned. + * @param limit The maximum number of container discrepancies to return. + * @param prevKey The container ID after which the results are returned. + * @param missingIn The missing filter parameter to specify if it's + * "OM" or "SCM" missing containers to be returned. */ @GET @Path("/mismatch") @@ -703,19 +718,20 @@ public Response getContainerMisMatchInsights( } - /** This API retrieves set of deleted containers in SCM which are present + /** + * This API retrieves set of deleted containers in SCM which are present * in OM to find out list of keys mapped to such DELETED state containers. - * + *

* limit - limits the number of such SCM DELETED containers present in OM. * prevKey - Skip containers till it seeks correctly to the previous * containerId. * Sample API Response: * [ - * { - * "containerId": 2, - * "numberOfKeys": 2, - * "pipelines": [] - * } + * { + * "containerId": 2, + * "numberOfKeys": 2, + * "pipelines": [] + * } * ] */ @GET diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketEntityHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketEntityHandler.java index 13846330f604..7ad961195ee7 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketEntityHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketEntityHandler.java @@ -143,7 +143,6 @@ public DUResponse getDuResponse( duResponse.setCount(dirDUData.size()); duResponse.setSize(bucketDataSize); duResponse.setDuData(dirDUData); - duResponse.setParentId(bucketNSSummary.getParentId()); return duResponse; } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/DirectoryEntityHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/DirectoryEntityHandler.java index 4712a13ba79a..fbb496988fcf 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/DirectoryEntityHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/DirectoryEntityHandler.java @@ -155,7 +155,6 @@ public DUResponse getDuResponse( duResponse.setCount(subdirDUData.size()); duResponse.setSize(dirDataSize); duResponse.setDuData(subdirDUData); - duResponse.setParentId(dirNSSummary.getParentId()); return duResponse; } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/VolumeEntityHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/VolumeEntityHandler.java index 367129691351..fae508a99c9d 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/VolumeEntityHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/VolumeEntityHandler.java @@ -132,7 +132,6 @@ public DUResponse getDuResponse( } duResponse.setSize(volDataSize); duResponse.setDuData(bucketDuData); - duResponse.setParentId(0L); return duResponse; } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DUResponse.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DUResponse.java index c04a60798e7b..5c550c8834cf 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DUResponse.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DUResponse.java @@ -122,17 +122,6 @@ public void setKeySize(long keySize) { this.keySize = keySize; } - - // Existing getters and setters - - public long getParentId() { - return parentId; - } - - public void setParentId(long parentId) { - this.parentId = parentId; - } - /** * DU info for a path (path name, data size). */ diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyMetadata.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyMetadata.java index c48e21d90f90..5094f47c24c2 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyMetadata.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyMetadata.java @@ -45,6 +45,9 @@ public class KeyMetadata { @XmlElement(name = "Key") private String key; + @XmlElement(name = "CompletePath") + private String completePath; + @XmlElement(name = "DataSize") private long dataSize; @@ -126,6 +129,14 @@ public void setBlockIds(Map> blockIds) { this.blockIds = blockIds; } + public String getCompletePath() { + return completePath; + } + + public void setCompletePath(String completePath) { + this.completePath = completePath; + } + /** * Class to hold ContainerID and BlockID. */ diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeysResponse.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeysResponse.java index 5b05975623c1..c09d28718e8b 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeysResponse.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeysResponse.java @@ -36,9 +36,13 @@ public class KeysResponse { @JsonProperty("keys") private Collection keys; - public KeysResponse(long totalCount, Collection keys) { + @JsonProperty("lastKey") + private String lastKey; + + public KeysResponse(long totalCount, Collection keys, String lastKey) { this.totalCount = totalCount; this.keys = keys; + this.lastKey = lastKey; } public long getTotalCount() { @@ -48,4 +52,7 @@ public long getTotalCount() { public Collection getKeys() { return keys; } + public String getLastKey() { + return lastKey; + } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java index 5ca49a13c157..b979307019ff 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java @@ -132,6 +132,7 @@ protected void handlePutDirEvent(OmDirectoryInfo directoryInfo, curNSSummary = new NSSummary(); } curNSSummary.setDirName(dirName); + // Set the parent directory ID if (parentObjectId != -1) { curNSSummary.setParentId(parentObjectId); } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java index cbe850b918f0..07ef7373d108 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java @@ -35,13 +35,10 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; +import org.apache.hadoop.ozone.om.helpers.*; import org.apache.hadoop.ozone.recon.ReconConstants; import org.apache.hadoop.ozone.recon.ReconTestInjector; +import org.apache.hadoop.ozone.recon.ReconUtils; import org.apache.hadoop.ozone.recon.api.handlers.BucketHandler; import org.apache.hadoop.ozone.recon.api.handlers.EntityHandler; import org.apache.hadoop.ozone.recon.api.types.DUResponse; @@ -57,6 +54,7 @@ import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; import org.apache.hadoop.ozone.recon.spi.impl.StorageContainerServiceProviderImpl; import org.apache.hadoop.ozone.recon.tasks.NSSummaryTaskWithFSO; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; @@ -114,6 +112,7 @@ public class TestNSSummaryEndpointWithFSO { private Path temporaryFolder; private ReconOMMetadataManager reconOMMetadataManager; + private ReconNamespaceSummaryManager reconNamespaceSummaryManager; private NSSummaryEndpoint nsSummaryEndpoint; private OzoneConfiguration ozoneConfiguration; private CommonUtils commonUtils; @@ -375,7 +374,7 @@ public void setUp() throws Exception { mock(StorageContainerServiceProviderImpl.class)) .addBinding(NSSummaryEndpoint.class) .build(); - ReconNamespaceSummaryManager reconNamespaceSummaryManager = + this.reconNamespaceSummaryManager = reconTestInjector.getInstance(ReconNamespaceSummaryManager.class); nsSummaryEndpoint = reconTestInjector.getInstance(NSSummaryEndpoint.class); @@ -691,6 +690,74 @@ public void checkFileSizeDist(String path, int bin0, } } + @Test + public void testConstructFullPath() throws IOException { + OmKeyInfo keyInfo = new OmKeyInfo.Builder() + .setKeyName("file2") + .setVolumeName(VOL) + .setBucketName(BUCKET_ONE) + .setObjectID(KEY_TWO_OBJECT_ID) + .setParentObjectID(DIR_TWO_OBJECT_ID) + .build(); + // Call constructFullPath and verify the result + String fullPath = ReconUtils.constructFullPath(keyInfo, + reconNamespaceSummaryManager); + String expectedPath = "vol/bucket1/dir1/dir2/file2"; + Assertions.assertEquals(expectedPath, fullPath); + + // Create key info for file 3 + keyInfo = new OmKeyInfo.Builder() + .setKeyName("file3") + .setVolumeName(VOL) + .setBucketName(BUCKET_ONE) + .setObjectID(KEY_THREE_OBJECT_ID) + .setParentObjectID(DIR_THREE_OBJECT_ID) + .build(); + fullPath = ReconUtils.constructFullPath(keyInfo, + reconNamespaceSummaryManager); + expectedPath = "vol/bucket1/dir1/dir3/file3"; + Assertions.assertEquals(expectedPath, fullPath); + + // Create key info for file 6 + keyInfo = new OmKeyInfo.Builder() + .setKeyName("file6") + .setVolumeName(VOL) + .setBucketName(BUCKET_ONE) + .setObjectID(KEY_SIX_OBJECT_ID) + .setParentObjectID(DIR_FOUR_OBJECT_ID) + .build(); + fullPath = ReconUtils.constructFullPath(keyInfo, + reconNamespaceSummaryManager); + expectedPath = "vol/bucket1/dir1/dir4/file6"; + Assertions.assertEquals(expectedPath, fullPath); + + // Create key info for file 1 + keyInfo = new OmKeyInfo.Builder() + .setKeyName("file1") + .setVolumeName(VOL) + .setBucketName(BUCKET_ONE) + .setObjectID(KEY_ONE_OBJECT_ID) + .setParentObjectID(BUCKET_ONE_OBJECT_ID) + .build(); + fullPath = ReconUtils.constructFullPath(keyInfo, + reconNamespaceSummaryManager); + expectedPath = "vol/bucket1/file1"; + Assertions.assertEquals(expectedPath, fullPath); + + // Create key info for file 9 + keyInfo = new OmKeyInfo.Builder() + .setKeyName("file9") + .setVolumeName(VOL_TWO) + .setBucketName(BUCKET_THREE) + .setObjectID(KEY_NINE_OBJECT_ID) + .setParentObjectID(DIR_FIVE_OBJECT_ID) + .build(); + fullPath = ReconUtils.constructFullPath(keyInfo, + reconNamespaceSummaryManager); + expectedPath = "vol2/bucket3/dir5/file9"; + Assertions.assertEquals(expectedPath, fullPath); + } + /** * Write directories and keys info into OM DB. * @throws Exception diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java index 765399f71e3a..9c6e6d2c2c39 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java @@ -36,13 +36,10 @@ import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; +import org.apache.hadoop.ozone.om.helpers.*; import org.apache.hadoop.ozone.recon.ReconConstants; import org.apache.hadoop.ozone.recon.ReconTestInjector; +import org.apache.hadoop.ozone.recon.ReconUtils; import org.apache.hadoop.ozone.recon.api.handlers.BucketHandler; import org.apache.hadoop.ozone.recon.api.handlers.EntityHandler; import org.apache.hadoop.ozone.recon.api.types.DUResponse; @@ -58,6 +55,7 @@ import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; import org.apache.hadoop.ozone.recon.spi.impl.StorageContainerServiceProviderImpl; import org.apache.hadoop.ozone.recon.tasks.NSSummaryTaskWithLegacy; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; @@ -115,6 +113,7 @@ public class TestNSSummaryEndpointWithLegacy { @TempDir private Path temporaryFolder; + ReconNamespaceSummaryManager reconNamespaceSummaryManager; private ReconOMMetadataManager reconOMMetadataManager; private NSSummaryEndpoint nsSummaryEndpoint; private OzoneConfiguration conf; @@ -378,7 +377,7 @@ public void setUp() throws Exception { mock(StorageContainerServiceProviderImpl.class)) .addBinding(NSSummaryEndpoint.class) .build(); - ReconNamespaceSummaryManager reconNamespaceSummaryManager = + this.reconNamespaceSummaryManager = reconTestInjector.getInstance(ReconNamespaceSummaryManager.class); nsSummaryEndpoint = reconTestInjector.getInstance(NSSummaryEndpoint.class); @@ -694,6 +693,48 @@ public void checkFileSizeDist(String path, int bin0, } } + @Test + public void testConstructFullPath() throws IOException { + // For Key Tables the parent object ID is not set hence it + // will by default be set as -1 when the NSSummary object is created + OmKeyInfo keyInfo = new OmKeyInfo.Builder() + .setKeyName("dir1/dir2/file2") + .setVolumeName(VOL) + .setBucketName(BUCKET_ONE) + .setObjectID(KEY_TWO_OBJECT_ID) + .build(); + // Call constructFullPath and verify the result + String fullPath = ReconUtils.constructFullPath(keyInfo, + reconNamespaceSummaryManager); + String expectedPath = "vol/bucket1/dir1/dir2/file2"; + Assertions.assertEquals(expectedPath, fullPath); + + // Create key info for file 3 + keyInfo = new OmKeyInfo.Builder() + .setKeyName("dir1/dir2/") + .setVolumeName(VOL) + .setBucketName(BUCKET_ONE) + .setObjectID(DIR_TWO_OBJECT_ID) + .build(); + fullPath = ReconUtils.constructFullPath(keyInfo, + reconNamespaceSummaryManager); + expectedPath = "vol/bucket1/dir1/dir2/"; + Assertions.assertEquals(expectedPath, fullPath); + + // Create key info for file 6 + keyInfo = new OmKeyInfo.Builder() + .setKeyName("dir1/dir4/file6") + .setVolumeName(VOL) + .setBucketName(BUCKET_ONE) + .setObjectID(KEY_SIX_OBJECT_ID) + .build(); + fullPath = ReconUtils.constructFullPath(keyInfo, + reconNamespaceSummaryManager); + expectedPath = "vol/bucket1/dir1/dir4/file6"; + Assertions.assertEquals(expectedPath, fullPath); + } + + /** * Write directories and keys info into OM DB. * @throws Exception diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java index 8d8299aefc18..fac8f8286d24 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java @@ -38,13 +38,10 @@ import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; +import org.apache.hadoop.ozone.om.helpers.*; import org.apache.hadoop.ozone.recon.ReconConstants; import org.apache.hadoop.ozone.recon.ReconTestInjector; +import org.apache.hadoop.ozone.recon.ReconUtils; import org.apache.hadoop.ozone.recon.api.handlers.BucketHandler; import org.apache.hadoop.ozone.recon.api.handlers.EntityHandler; import org.apache.hadoop.ozone.recon.api.types.BucketObjectDBInfo; @@ -65,6 +62,7 @@ import org.apache.hadoop.ozone.recon.spi.impl.StorageContainerServiceProviderImpl; import org.apache.hadoop.ozone.recon.tasks.NSSummaryTaskWithLegacy; import org.apache.hadoop.ozone.recon.tasks.NSSummaryTaskWithOBS; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; @@ -109,25 +107,26 @@ * . * └── vol * ├── bucket1 (OBS) - * │ ├── file1 - * │ ├── file2 - * │ └── file3 + * │ ├── KEY_ONE + * │ ├── KEY_TWO + * │ └── KEY_THREE * └── bucket2 (OBS) - * ├── file4 - * └── file5 + * ├── KEY_FOUR + * └── KEY_FIVE * └── vol2 * ├── bucket3 (Legacy) - * │ ├── file8 - * │ ├── file9 - * │ └── file10 + * │ ├── KEY_EIGHT + * │ ├── KEY_NINE + * │ └── KEY_TEN * └── bucket4 (Legacy) - * └── file11 + * └── KEY_ELEVEN */ public class TestNSSummaryEndpointWithOBSAndLegacy { @TempDir private Path temporaryFolder; private ReconOMMetadataManager reconOMMetadataManager; + private ReconNamespaceSummaryManager reconNamespaceSummaryManager; private NSSummaryEndpoint nsSummaryEndpoint; private OzoneConfiguration conf; private CommonUtils commonUtils; @@ -374,7 +373,7 @@ public void setUp() throws Exception { mock(StorageContainerServiceProviderImpl.class)) .addBinding(NSSummaryEndpoint.class) .build(); - ReconNamespaceSummaryManager reconNamespaceSummaryManager = + reconNamespaceSummaryManager = reconTestInjector.getInstance(ReconNamespaceSummaryManager.class); nsSummaryEndpoint = reconTestInjector.getInstance(NSSummaryEndpoint.class); @@ -904,6 +903,54 @@ public void testNormalizePathUptoBucket() { OmUtils.normalizePathUptoBucket("volume/bucket/key$%#1/./////////key$%#2")); } + @Test + public void testConstructFullPath() throws IOException { + OmKeyInfo keyInfo = new OmKeyInfo.Builder() + .setKeyName(KEY_TWO) + .setVolumeName(VOL) + .setBucketName(BUCKET_ONE) + .setObjectID(KEY_TWO_OBJECT_ID) + .build(); + String fullPath = ReconUtils.constructFullPath(keyInfo, + reconNamespaceSummaryManager); + String expectedPath = "vol/bucket1/" + KEY_TWO; + Assertions.assertEquals(expectedPath, fullPath); + + keyInfo = new OmKeyInfo.Builder() + .setKeyName(KEY_FIVE) + .setVolumeName(VOL) + .setBucketName(BUCKET_TWO) + .setObjectID(KEY_FIVE_OBJECT_ID) + .build(); + fullPath = ReconUtils.constructFullPath(keyInfo, + reconNamespaceSummaryManager); + expectedPath = "vol/bucket2/" + KEY_FIVE; + Assertions.assertEquals(expectedPath, fullPath); + + keyInfo = new OmKeyInfo.Builder() + .setKeyName(KEY_EIGHT) + .setVolumeName(VOL_TWO) + .setBucketName(BUCKET_THREE) + .setObjectID(KEY_EIGHT_OBJECT_ID) + .build(); + fullPath = ReconUtils.constructFullPath(keyInfo, + reconNamespaceSummaryManager); + expectedPath = "vol2/bucket3/" + KEY_EIGHT; + Assertions.assertEquals(expectedPath, fullPath); + + + keyInfo = new OmKeyInfo.Builder() + .setKeyName(KEY_ELEVEN) + .setVolumeName(VOL_TWO) + .setBucketName(BUCKET_FOUR) + .setObjectID(KEY_ELEVEN_OBJECT_ID) + .build(); + fullPath = ReconUtils.constructFullPath(keyInfo, + reconNamespaceSummaryManager); + expectedPath = "vol2/bucket4/" + KEY_ELEVEN; + Assertions.assertEquals(expectedPath, fullPath); + } + /** * Testing the following case. diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithFSO.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithFSO.java index 66c522cb4d70..b7d8ea5e8c2b 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithFSO.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithFSO.java @@ -51,9 +51,7 @@ import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDirToOm; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeKeyToOm; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertNull; -import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.*; /** * Test for NSSummaryTaskWithFSO. @@ -270,6 +268,37 @@ public void testReprocessDirsUnderDir() throws Exception { assertEquals(DIR_ONE, nsSummaryInDir1.getDirName()); assertEquals(DIR_TWO, nsSummaryInDir2.getDirName()); } + + @Test + public void testDirectoryParentIdAssignment() throws Exception { + // Trigger reprocess to simulate reading from OM DB and processing into NSSummary. + nSSummaryTaskWithFso.reprocessWithFSO(reconOMMetadataManager); + + // Fetch NSSummary for DIR_ONE and verify its parent ID matches BUCKET_ONE_OBJECT_ID. + NSSummary nsSummaryDirOne = + reconNamespaceSummaryManager.getNSSummary(DIR_ONE_OBJECT_ID); + assertNotNull(nsSummaryDirOne, + "NSSummary for DIR_ONE should not be null."); + assertEquals(BUCKET_ONE_OBJECT_ID, nsSummaryDirOne.getParentId(), + "DIR_ONE's parent ID should match BUCKET_ONE_OBJECT_ID."); + + // Fetch NSSummary for DIR_TWO and verify its parent ID matches DIR_ONE_OBJECT_ID. + NSSummary nsSummaryDirTwo = + reconNamespaceSummaryManager.getNSSummary(DIR_TWO_OBJECT_ID); + assertNotNull(nsSummaryDirTwo, + "NSSummary for DIR_TWO should not be null."); + assertEquals(DIR_ONE_OBJECT_ID, nsSummaryDirTwo.getParentId(), + "DIR_TWO's parent ID should match DIR_ONE_OBJECT_ID."); + + // Fetch NSSummary for DIR_THREE and verify its parent ID matches DIR_ONE_OBJECT_ID. + NSSummary nsSummaryDirThree = + reconNamespaceSummaryManager.getNSSummary(DIR_THREE_OBJECT_ID); + assertNotNull(nsSummaryDirThree, + "NSSummary for DIR_THREE should not be null."); + assertEquals(DIR_ONE_OBJECT_ID, nsSummaryDirThree.getParentId(), + "DIR_THREE's parent ID should match DIR_ONE_OBJECT_ID."); + } + } /** @@ -462,6 +491,27 @@ public void testProcessDirDeleteRename() throws IOException { // after renaming dir1, check its new name assertEquals(DIR_ONE_RENAME, nsSummaryForDir1.getDirName()); } + + @Test + public void testParentIdAfterProcessEventBatch() throws IOException { + + // Verify the parent ID of DIR_FOUR after it's added under BUCKET_ONE. + NSSummary nsSummaryDirFour = + reconNamespaceSummaryManager.getNSSummary(DIR_FOUR_OBJECT_ID); + assertNotNull(nsSummaryDirFour, + "NSSummary for DIR_FOUR should not be null."); + assertEquals(BUCKET_ONE_OBJECT_ID, nsSummaryDirFour.getParentId(), + "DIR_FOUR's parent ID should match BUCKET_ONE_OBJECT_ID."); + + // Verify the parent ID of DIR_FIVE after it's added under BUCKET_TWO. + NSSummary nsSummaryDirFive = + reconNamespaceSummaryManager.getNSSummary(DIR_FIVE_OBJECT_ID); + assertNotNull(nsSummaryDirFive, + "NSSummary for DIR_FIVE should not be null."); + assertEquals(BUCKET_TWO_OBJECT_ID, nsSummaryDirFive.getParentId(), + "DIR_FIVE's parent ID should match BUCKET_TWO_OBJECT_ID."); + } + } /** From f75aac29cf576bd54115e353667fce5e1d250ee5 Mon Sep 17 00:00:00 2001 From: arafat Date: Mon, 8 Apr 2024 01:19:57 +0530 Subject: [PATCH 03/21] Fixed bugs and checkstyle issues --- .../ozone/recon/api/ContainerEndpoint.java | 58 +++++++++---------- .../ozone/recon/api/types/DUResponse.java | 4 -- .../api/TestNSSummaryEndpointWithFSO.java | 7 ++- .../api/TestNSSummaryEndpointWithLegacy.java | 9 ++- ...TestNSSummaryEndpointWithOBSAndLegacy.java | 7 ++- .../TestReconNamespaceSummaryManagerImpl.java | 2 +- .../recon/tasks/TestNSSummaryTaskWithFSO.java | 4 +- 7 files changed, 50 insertions(+), 41 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java index 2d502608bb1d..c5c5170ba682 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java @@ -31,7 +31,6 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; -import org.apache.hadoop.ozone.recon.api.types.NSSummary; import org.apache.hadoop.ozone.recon.ReconUtils; import org.apache.hadoop.ozone.recon.api.types.ContainerDiscrepancyInfo; import org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix; @@ -78,7 +77,6 @@ import java.util.HashMap; import java.util.stream.Collectors; -import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_FILTER_FOR_MISSING_CONTAINERS; import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_FILTER; import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_BATCH_NUMBER; @@ -376,10 +374,10 @@ public Response getMissingContainers( * {@link org.apache.hadoop.ozone.recon.api.types.UnhealthyContainerMetadata} * for all unhealthy containers. * - * @param state Return only containers matching the given unhealthy state, - * eg UNDER_REPLICATED, MIS_REPLICATED, OVER_REPLICATED or - * MISSING. Passing null returns all containers. - * @param limit The limit of unhealthy containers to return. + * @param state Return only containers matching the given unhealthy state, + * eg UNDER_REPLICATED, MIS_REPLICATED, OVER_REPLICATED or + * MISSING. Passing null returns all containers. + * @param limit The limit of unhealthy containers to return. * @param batchNum The batch number (like "page number") of results to return. * Passing 1, will return records 1 to limit. 2 will return * limit + 1 to 2 * limit, etc. @@ -446,8 +444,7 @@ public Response getUnhealthyContainers( * Return * {@link org.apache.hadoop.ozone.recon.api.types.UnhealthyContainerMetadata} * for all unhealthy containers. - * - * @param limit The limit of unhealthy containers to return. + * @param limit The limit of unhealthy containers to return. * @param batchNum The batch number (like "page number") of results to return. * Passing 1, will return records 1 to limit. 2 will return * limit + 1 to 2 * limit, etc. @@ -468,23 +465,22 @@ public Response getUnhealthyContainers( * { * containers: [ * { - * containerId: 1, - * state: DELETED, - * pipelineId: "a10ffab6-8ed5-414a-aaf5-79890ff3e8a1", - * numOfKeys: 3, - * inStateSince: + * containerId: 1, + * state: DELETED, + * pipelineId: "a10ffab6-8ed5-414a-aaf5-79890ff3e8a1", + * numOfKeys: 3, + * inStateSince: * }, * { - * containerId: 2, - * state: DELETED, - * pipelineId: "a10ffab6-8ed5-414a-aaf5-79890ff3e8a1", - * numOfKeys: 6, - * inStateSince: + * containerId: 2, + * state: DELETED, + * pipelineId: "a10ffab6-8ed5-414a-aaf5-79890ff3e8a1", + * numOfKeys: 6, + * inStateSince: * } * ] * } - * - * @param limit limits the number of deleted containers + * @param limit limits the number of deleted containers * @param prevKey previous container Id to skip * @return Response of deleted containers. */ @@ -557,17 +553,17 @@ private List getBlocks( /** * Retrieves the container mismatch insights. - *

+ * * This method returns a list of ContainerDiscrepancyInfo objects representing * the containers that are missing in either the Ozone Manager (OM) or the * Storage Container Manager (SCM), based on the provided filter parameter. * The returned list is paginated based on the provided limit and prevKey * parameters. * - * @param limit The maximum number of container discrepancies to return. - * @param prevKey The container ID after which the results are returned. - * @param missingIn The missing filter parameter to specify if it's - * "OM" or "SCM" missing containers to be returned. + * @param limit The maximum number of container discrepancies to return. + * @param prevKey The container ID after which the results are returned. + * @param missingIn The missing filter parameter to specify if it's + * "OM" or "SCM" missing containers to be returned. */ @GET @Path("/mismatch") @@ -721,17 +717,17 @@ public Response getContainerMisMatchInsights( /** * This API retrieves set of deleted containers in SCM which are present * in OM to find out list of keys mapped to such DELETED state containers. - *

+ * * limit - limits the number of such SCM DELETED containers present in OM. * prevKey - Skip containers till it seeks correctly to the previous * containerId. * Sample API Response: * [ - * { - * "containerId": 2, - * "numberOfKeys": 2, - * "pipelines": [] - * } + * { + * "containerId": 2, + * "numberOfKeys": 2, + * "pipelines": [] + * } * ] */ @GET diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DUResponse.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DUResponse.java index 5c550c8834cf..b28d9d39c210 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DUResponse.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DUResponse.java @@ -54,16 +54,12 @@ public class DUResponse { @JsonProperty("sizeDirectKey") private long keySize; - @JsonProperty("parentId") - private long parentId; - public DUResponse() { this.status = ResponseStatus.OK; this.duData = new ArrayList<>(); // by default, the replication feature is disabled this.sizeWithReplica = -1L; this.keySize = -1L; - this.parentId = -1L; } public ResponseStatus getStatus() { diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java index 07ef7373d108..53b46dbe6f47 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java @@ -35,7 +35,12 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.helpers.*; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.recon.ReconConstants; import org.apache.hadoop.ozone.recon.ReconTestInjector; import org.apache.hadoop.ozone.recon.ReconUtils; diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java index 9c6e6d2c2c39..9723a30ed1df 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java @@ -36,7 +36,12 @@ import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.helpers.*; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.recon.ReconConstants; import org.apache.hadoop.ozone.recon.ReconTestInjector; import org.apache.hadoop.ozone.recon.ReconUtils; @@ -113,7 +118,7 @@ public class TestNSSummaryEndpointWithLegacy { @TempDir private Path temporaryFolder; - ReconNamespaceSummaryManager reconNamespaceSummaryManager; + private ReconNamespaceSummaryManager reconNamespaceSummaryManager; private ReconOMMetadataManager reconOMMetadataManager; private NSSummaryEndpoint nsSummaryEndpoint; private OzoneConfiguration conf; diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java index fac8f8286d24..ff97f808c52c 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java @@ -38,7 +38,12 @@ import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.helpers.*; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.recon.ReconConstants; import org.apache.hadoop.ozone.recon.ReconTestInjector; import org.apache.hadoop.ozone.recon.ReconUtils; diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconNamespaceSummaryManagerImpl.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconNamespaceSummaryManagerImpl.java index 2ed85aa5271c..f0af066c46f3 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconNamespaceSummaryManagerImpl.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconNamespaceSummaryManagerImpl.java @@ -114,7 +114,7 @@ public void testInitNSSummaryTable() throws IOException { private void putThreeNSMetadata() throws IOException { HashMap hmap = new HashMap<>(); - hmap.put(1L, new NSSummary(1, 2, testBucket, TEST_CHILD_DIR, "dir1",-1)); + hmap.put(1L, new NSSummary(1, 2, testBucket, TEST_CHILD_DIR, "dir1", -1)); hmap.put(2L, new NSSummary(3, 4, testBucket, TEST_CHILD_DIR, "dir2", -1)); hmap.put(3L, new NSSummary(5, 6, testBucket, TEST_CHILD_DIR, "dir3", -1)); RDBBatchOperation rdbBatchOperation = new RDBBatchOperation(); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithFSO.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithFSO.java index b7d8ea5e8c2b..ba2e7497417e 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithFSO.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithFSO.java @@ -51,7 +51,9 @@ import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDirToOm; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeKeyToOm; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD; -import static org.junit.jupiter.api.Assertions.*; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; /** * Test for NSSummaryTaskWithFSO. From ee97fa60fead8161ab1c042b776d3308fd18c972 Mon Sep 17 00:00:00 2001 From: arafat Date: Wed, 17 Apr 2024 19:44:58 +0530 Subject: [PATCH 04/21] Added Integration Tests to ContainerEndpoint --- .../recon/TestReconContainerEndpoint.java | 208 ++++++++++++++++++ .../ozone/recon/api/ContainerEndpoint.java | 10 +- 2 files changed, 214 insertions(+), 4 deletions(-) create mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconContainerEndpoint.java diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconContainerEndpoint.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconContainerEndpoint.java new file mode 100644 index 000000000000..11aac69fe746 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconContainerEndpoint.java @@ -0,0 +1,208 @@ +package org.apache.hadoop.ozone.recon; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; +import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.client.BucketArgs; +import org.apache.hadoop.ozone.client.ObjectStore; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.client.io.OzoneOutputStream; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.recon.api.ContainerEndpoint; +import org.apache.hadoop.ozone.recon.api.types.KeyMetadata; +import org.apache.hadoop.ozone.recon.api.types.KeysResponse; +import org.apache.hadoop.ozone.recon.persistence.ContainerHealthSchemaManager; +import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; +import org.apache.hadoop.ozone.recon.scm.ReconContainerManager; +import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; +import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; + +import javax.ws.rs.core.Response; +import java.io.IOException; +import java.util.Collection; +import java.util.List; + +import static org.junit.jupiter.api.Assertions.*; + +public class TestReconContainerEndpoint { + + private static OzoneClient client; + private static MiniOzoneCluster cluster = null; + private static OzoneConfiguration conf; + private static ObjectStore store; + + @BeforeAll + public static void init() throws Exception { + conf = new OzoneConfiguration(); + conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT, + OMConfigKeys.OZONE_BUCKET_LAYOUT_FILE_SYSTEM_OPTIMIZED); + cluster = MiniOzoneCluster.newBuilder(conf) + .setNumDatanodes(3) + .includeRecon(true) + .build(); + cluster.waitForClusterToBeReady(); + client = cluster.newClient(); + store = client.getObjectStore(); + } + + @AfterAll + public static void shutdown() throws IOException { + if (client != null) { + client.close(); + } + if (cluster != null) { + cluster.shutdown(); + } + } + + @Test + public void testContainerEndpointForFSOLayout() throws Exception { + // Setup: Create multiple volumes, buckets, and key hierarchies + String volName = "testvol"; + String bucketName = "fsobucket"; + // Scenario 1: Deeply nested directories + String nestedDirKey = "dir1/dir2/dir3/file1"; + // Scenario 2: Single file in a bucket + String singleFileKey = "file1"; + + // Create volume and bucket + store.createVolume(volName); + OzoneVolume volume = store.getVolume(volName); + volume.createBucket(bucketName, BucketArgs.newBuilder() + .setBucketLayout(BucketLayout.FILE_SYSTEM_OPTIMIZED).build()); + + // Write keys to the bucket + writeTestData(volName, bucketName, nestedDirKey, "data1"); + writeTestData(volName, bucketName, singleFileKey, "data2"); + + // Synchronize data from OM to Recon + OzoneManagerServiceProviderImpl impl = (OzoneManagerServiceProviderImpl) + cluster.getReconServer().getOzoneManagerServiceProvider(); + impl.syncDataFromOM(); + + //Search for the bucket from the bucket table and verify its FSO + String buckKey = "/" + volName + "/" + bucketName; + OmBucketInfo bucketInfo = + cluster.getReconServer().getOzoneManagerServiceProvider() + .getOMMetadataManagerInstance().getBucketTable().get(buckKey); + assertNotNull(bucketInfo); + assertEquals(BucketLayout.FILE_SYSTEM_OPTIMIZED, + bucketInfo.getBucketLayout()); + + // Assuming a known container ID that these keys have been written into + long testContainerID = 1L; + + // Query the ContainerEndpoint for the keys in the specified container + Response response = getContainerEndpointResponse(testContainerID); + + assertNotNull(response, "Response should not be null."); + assertEquals(Response.Status.OK.getStatusCode(), response.getStatus(), + "Expected HTTP 200 OK response."); + + KeysResponse data = (KeysResponse) response.getEntity(); + Collection keyMetadataList = data.getKeys(); + + assertEquals(1, data.getTotalCount()); + assertEquals(1, keyMetadataList.size()); + + // Assert the file name and the complete path. + KeyMetadata keyMetadata = keyMetadataList.iterator().next(); + assertEquals("file1", keyMetadata.getKey()); + assertEquals("testvol/testbucket/dir1/dir2/dir3/file1", + keyMetadata.getCompletePath()); + + testContainerID = 2L; + response = getContainerEndpointResponse(testContainerID); + data = (KeysResponse) response.getEntity(); + keyMetadataList = data.getKeys(); + assertEquals(1, data.getTotalCount()); + assertEquals(1, keyMetadataList.size()); + + // Assert the file name and the complete path. + keyMetadata = keyMetadataList.iterator().next(); + assertEquals("file1", keyMetadata.getKey()); + assertEquals("testvol/testbucket/file1", keyMetadata.getCompletePath()); + } + + @Test + public void testContainerEndpointForOBSBucket() throws Exception { + String volumeName = "testvol"; + String obsBucketName = "obsbucket"; + String obsSingleFileKey = "file1"; + + // Setup volume and OBS bucket + store.createVolume(volumeName); + OzoneVolume volume = store.getVolume(volumeName); + volume.createBucket(obsBucketName, + BucketArgs.newBuilder().setBucketLayout(BucketLayout.OBJECT_STORE) + .build()); + + // Write a single file to the OBS bucket + writeTestData(volumeName, obsBucketName, obsSingleFileKey, "Hello OBS!"); + + OzoneManagerServiceProviderImpl impl = + (OzoneManagerServiceProviderImpl) cluster.getReconServer() + .getOzoneManagerServiceProvider(); + impl.syncDataFromOM(); + + // Search for the bucket from the bucket table and verify its OBS + String bucketKey = "/" + volumeName + "/" + obsBucketName; + OmBucketInfo bucketInfo = + cluster.getReconServer().getOzoneManagerServiceProvider() + .getOMMetadataManagerInstance().getBucketTable().get(bucketKey); + assertNotNull(bucketInfo); + assertEquals(BucketLayout.OBJECT_STORE, bucketInfo.getBucketLayout()); + + // Initialize the ContainerEndpoint + long containerId = 1L; + Response response = getContainerEndpointResponse(containerId); + + assertNotNull(response, "Response should not be null."); + assertEquals(Response.Status.OK.getStatusCode(), response.getStatus(), + "Expected HTTP 200 OK response."); + KeysResponse data = (KeysResponse) response.getEntity(); + Collection keyMetadataList = data.getKeys(); + + assertEquals(1, data.getTotalCount()); + assertEquals(1, keyMetadataList.size()); + + KeyMetadata keyMetadata = keyMetadataList.iterator().next(); + assertEquals("file1", keyMetadata.getKey()); + assertEquals("testvol/obsbucket/file1", keyMetadata.getCompletePath()); + } + + private Response getContainerEndpointResponse(long containerId) { + OzoneStorageContainerManager reconSCM = + cluster.getReconServer().getReconStorageContainerManager(); + ReconContainerManager reconContainerManager = + (ReconContainerManager) reconSCM.getContainerManager(); + ContainerHealthSchemaManager containerHealthSchemaManager = + reconContainerManager.getContainerSchemaManager(); + ReconOMMetadataManager omMetadataManagerInstance = + (ReconOMMetadataManager) + cluster.getReconServer().getOzoneManagerServiceProvider() + .getOMMetadataManagerInstance(); + ContainerEndpoint containerEndpoint = + new ContainerEndpoint(reconSCM, containerHealthSchemaManager, + cluster.getReconServer().getReconNamespaceSummaryManager(), + cluster.getReconServer().getReconContainerMetadataManager(), + omMetadataManagerInstance); + return containerEndpoint.getKeysForContainer(containerId, 10, ""); + } + + private void writeTestData(String volumeName, String bucketName, + String keyPath, String data) throws Exception { + try (OzoneOutputStream out = client.getObjectStore().getVolume(volumeName) + .getBucket(bucketName) + .createKey(keyPath, data.length())) { + out.write(data.getBytes()); + } + } + +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java index c5c5170ba682..8bb50e247ec4 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException; import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; +import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; @@ -95,10 +96,7 @@ @AdminOnly public class ContainerEndpoint { - @Inject private ReconContainerMetadataManager reconContainerMetadataManager; - - @Inject private ReconOMMetadataManager omMetadataManager; private final ReconContainerManager containerManager; @@ -146,13 +144,17 @@ public static DataFilter fromValue(String value) { @Inject public ContainerEndpoint(OzoneStorageContainerManager reconSCM, ContainerHealthSchemaManager containerHealthSchemaManager, - ReconNamespaceSummaryManager reconNamespaceSummaryManager) { + ReconNamespaceSummaryManager reconNamespaceSummaryManager, + ReconContainerMetadataManager reconContainerMetadataManager, + ReconOMMetadataManager omMetadataManager) { this.containerManager = (ReconContainerManager) reconSCM.getContainerManager(); this.pipelineManager = reconSCM.getPipelineManager(); this.containerHealthSchemaManager = containerHealthSchemaManager; this.reconNamespaceSummaryManager = reconNamespaceSummaryManager; this.reconSCM = reconSCM; + this.reconContainerMetadataManager = reconContainerMetadataManager; + this.omMetadataManager = omMetadataManager; } /** From 7faa2e19fb9fd2994e7cbf2c5e71ded56b916390 Mon Sep 17 00:00:00 2001 From: arafat Date: Mon, 29 Apr 2024 14:00:38 +0530 Subject: [PATCH 05/21] Rebuilding tree to prevent backwards compatability --- .../apache/hadoop/ozone/recon/ReconUtils.java | 14 +++++++++++++ .../spi/ReconNamespaceSummaryManager.java | 3 +++ .../ReconNamespaceSummaryManagerImpl.java | 12 ++++++++++- .../tasks/NSSummaryTaskDbEventHandler.java | 21 ++++++++++++++----- 4 files changed, 44 insertions(+), 6 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java index 174f2442ff7e..a576417baf93 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java @@ -24,6 +24,7 @@ import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.IOException; +import java.lang.reflect.Field; import java.net.HttpURLConnection; import java.net.URL; import java.nio.file.Path; @@ -292,6 +293,19 @@ public static String constructFullPath(OmKeyInfo omKeyInfo, return fullPath.toString(); } + public static boolean hasParentIdField(NSSummary nsSummary) { + Field field; + try { + // This line attempts to get the Field object representing the parentId + // field from the NSSummary class. If the parentId field does not exist, + // a NoSuchFieldException will be thrown. + field = NSSummary.class.getDeclaredField("parentId"); + return true; + } catch (NoSuchFieldException e) { + return false; // Field not present + } + } + /** * Make HTTP GET call on the URL and return HttpURLConnection instance. diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ReconNamespaceSummaryManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ReconNamespaceSummaryManager.java index 6cb93e7134a2..ea0ff6ed5df4 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ReconNamespaceSummaryManager.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ReconNamespaceSummaryManager.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hdds.annotation.InterfaceStability; import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.RDBBatchOperation; +import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.recon.api.types.NSSummary; import java.io.IOException; @@ -45,4 +46,6 @@ void batchStoreNSSummaries(BatchOperation batch, long objectId, void commitBatchOperation(RDBBatchOperation rdbBatchOperation) throws IOException; + + void rebuildNSSummaryTree(OMMetadataManager omMetadataManager); } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconNamespaceSummaryManagerImpl.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconNamespaceSummaryManagerImpl.java index 42a30095f315..9167854a8263 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconNamespaceSummaryManagerImpl.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconNamespaceSummaryManagerImpl.java @@ -22,8 +22,11 @@ import org.apache.hadoop.hdds.utils.db.DBStore; import org.apache.hadoop.hdds.utils.db.RDBBatchOperation; import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.recon.api.types.NSSummary; import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; +import org.apache.hadoop.ozone.recon.tasks.NSSummaryTask; + import static org.apache.hadoop.ozone.recon.spi.impl.ReconDBProvider.truncateTable; import javax.inject.Inject; @@ -39,12 +42,14 @@ public class ReconNamespaceSummaryManagerImpl private Table nsSummaryTable; private DBStore namespaceDbStore; + private NSSummaryTask nsSummaryTask; @Inject - public ReconNamespaceSummaryManagerImpl(ReconDBProvider reconDBProvider) + public ReconNamespaceSummaryManagerImpl(ReconDBProvider reconDBProvider, NSSummaryTask nsSummaryTask) throws IOException { namespaceDbStore = reconDBProvider.getDbStore(); this.nsSummaryTable = NAMESPACE_SUMMARY.getTable(namespaceDbStore); + this.nsSummaryTask = nsSummaryTask; } @Override @@ -81,6 +86,11 @@ public void commitBatchOperation(RDBBatchOperation rdbBatchOperation) this.namespaceDbStore.commitBatchOperation(rdbBatchOperation); } + @Override + public void rebuildNSSummaryTree(OMMetadataManager omMetadataManager) { + nsSummaryTask.reprocess(omMetadataManager); + } + public Table getNSSummaryTable() { return nsSummaryTable; } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java index b979307019ff..af1347439ea9 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java @@ -30,10 +30,12 @@ import org.slf4j.LoggerFactory; import java.io.IOException; +import java.lang.reflect.Field; import java.util.Map; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD_DEFAULT; +import static org.apache.hadoop.ozone.recon.ReconUtils.hasParentIdField; /** * Class for holding all NSSummaryTask methods @@ -125,11 +127,15 @@ protected void handlePutDirEvent(OmDirectoryInfo directoryInfo, if (curNSSummary == null) { // If we don't have it in this batch we try to get it from the DB curNSSummary = reconNamespaceSummaryManager.getNSSummary(objectId); - } - if (curNSSummary == null) { - // If we don't have it locally and in the DB we create a new instance - // as this is a new ID - curNSSummary = new NSSummary(); + if (curNSSummary == null) { + // If we don't have it locally and in the DB we create a new instance + // as this is a new ID + curNSSummary = new NSSummary(); + } else if (!hasParentIdField(curNSSummary)) { + // Call reprocess method if parentId is missing + reconNamespaceSummaryManager.rebuildNSSummaryTree(reconOMMetadataManager); + curNSSummary = reconNamespaceSummaryManager.getNSSummary(objectId); + } } curNSSummary.setDirName(dirName); // Set the parent directory ID @@ -145,6 +151,11 @@ protected void handlePutDirEvent(OmDirectoryInfo directoryInfo, // If we don't have it in this batch we try to get it from the DB nsSummary = reconNamespaceSummaryManager.getNSSummary(parentObjectId); } + if (nsSummary != null && !hasParentIdField(nsSummary)) { + // Call reprocess method if parentId is missing + reconNamespaceSummaryManager.rebuildNSSummaryTree(reconOMMetadataManager); + nsSummary = reconNamespaceSummaryManager.getNSSummary(objectId); + } if (nsSummary == null) { // If we don't have it locally and in the DB we create a new instance // as this is a new ID From 0d03da503aa67d207eba233d413dd33f951e29c8 Mon Sep 17 00:00:00 2001 From: arafat Date: Mon, 29 Apr 2024 16:04:58 +0530 Subject: [PATCH 06/21] Finished the changes for preventing compatability issues --- .../apache/hadoop/ozone/recon/ReconUtils.java | 9 ++++++-- .../ozone/recon/api/ContainerEndpoint.java | 2 +- .../tasks/NSSummaryTaskDbEventHandler.java | 23 +++++++++---------- .../api/TestNSSummaryEndpointWithFSO.java | 10 ++++---- .../api/TestNSSummaryEndpointWithLegacy.java | 6 ++--- ...TestNSSummaryEndpointWithOBSAndLegacy.java | 8 +++---- 6 files changed, 31 insertions(+), 27 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java index a576417baf93..788d1d10c165 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java @@ -66,6 +66,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.recon.api.types.NSSummary; import org.apache.hadoop.ozone.recon.api.types.DUResponse; +import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.apache.hadoop.ozone.recon.scm.ReconContainerReportQueue; import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; import org.apache.hadoop.security.authentication.client.AuthenticationException; @@ -265,7 +266,8 @@ public void untarCheckpointFile(File tarFile, Path destPath) * @throws IOException */ public static String constructFullPath(OmKeyInfo omKeyInfo, - ReconNamespaceSummaryManager reconNamespaceSummaryManager) + ReconNamespaceSummaryManager reconNamespaceSummaryManager, + ReconOMMetadataManager omMetadataManager) throws IOException { StringBuilder fullPath = new StringBuilder(omKeyInfo.getKeyName()); long parentId = omKeyInfo.getParentObjectID(); @@ -275,7 +277,10 @@ public static String constructFullPath(OmKeyInfo omKeyInfo, if (nsSummary == null) { break; } - // Prepend the directory name to the path + if (!hasParentIdField(nsSummary)) { + // Call reprocess method if parentId is missing + reconNamespaceSummaryManager.rebuildNSSummaryTree(omMetadataManager); + } fullPath.insert(0, nsSummary.getDirName() + OM_KEY_PREFIX); // Move to the parent ID of the current directory diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java index 8bb50e247ec4..3e6642766696 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java @@ -289,7 +289,7 @@ public Response getKeysForContainer( keyMetadata.setVolume(omKeyInfo.getVolumeName()); keyMetadata.setKey(omKeyInfo.getKeyName()); keyMetadata.setCompletePath(ReconUtils.constructFullPath(omKeyInfo, - reconNamespaceSummaryManager)); + reconNamespaceSummaryManager, omMetadataManager)); keyMetadata.setCreationTime( Instant.ofEpochMilli(omKeyInfo.getCreationTime())); keyMetadata.setModificationTime( diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java index af1347439ea9..fc121ad4fe06 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java @@ -127,15 +127,15 @@ protected void handlePutDirEvent(OmDirectoryInfo directoryInfo, if (curNSSummary == null) { // If we don't have it in this batch we try to get it from the DB curNSSummary = reconNamespaceSummaryManager.getNSSummary(objectId); - if (curNSSummary == null) { - // If we don't have it locally and in the DB we create a new instance - // as this is a new ID - curNSSummary = new NSSummary(); - } else if (!hasParentIdField(curNSSummary)) { - // Call reprocess method if parentId is missing - reconNamespaceSummaryManager.rebuildNSSummaryTree(reconOMMetadataManager); - curNSSummary = reconNamespaceSummaryManager.getNSSummary(objectId); - } + } + if (!hasParentIdField(curNSSummary)) { + reconNamespaceSummaryManager.rebuildNSSummaryTree(reconOMMetadataManager); + return; + } + if (curNSSummary == null) { + // If we don't have it locally and in the DB we create a new instance + // as this is a new ID + curNSSummary = new NSSummary(); } curNSSummary.setDirName(dirName); // Set the parent directory ID @@ -151,10 +151,9 @@ protected void handlePutDirEvent(OmDirectoryInfo directoryInfo, // If we don't have it in this batch we try to get it from the DB nsSummary = reconNamespaceSummaryManager.getNSSummary(parentObjectId); } - if (nsSummary != null && !hasParentIdField(nsSummary)) { - // Call reprocess method if parentId is missing + if (!hasParentIdField(nsSummary)) { reconNamespaceSummaryManager.rebuildNSSummaryTree(reconOMMetadataManager); - nsSummary = reconNamespaceSummaryManager.getNSSummary(objectId); + return; } if (nsSummary == null) { // If we don't have it locally and in the DB we create a new instance diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java index 08f43770bf97..82669edf912d 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java @@ -711,7 +711,7 @@ public void testConstructFullPath() throws IOException { .build(); // Call constructFullPath and verify the result String fullPath = ReconUtils.constructFullPath(keyInfo, - reconNamespaceSummaryManager); + reconNamespaceSummaryManager, reconOMMetadataManager); String expectedPath = "vol/bucket1/dir1/dir2/file2"; Assertions.assertEquals(expectedPath, fullPath); @@ -724,7 +724,7 @@ public void testConstructFullPath() throws IOException { .setParentObjectID(DIR_THREE_OBJECT_ID) .build(); fullPath = ReconUtils.constructFullPath(keyInfo, - reconNamespaceSummaryManager); + reconNamespaceSummaryManager, reconOMMetadataManager); expectedPath = "vol/bucket1/dir1/dir3/file3"; Assertions.assertEquals(expectedPath, fullPath); @@ -737,7 +737,7 @@ public void testConstructFullPath() throws IOException { .setParentObjectID(DIR_FOUR_OBJECT_ID) .build(); fullPath = ReconUtils.constructFullPath(keyInfo, - reconNamespaceSummaryManager); + reconNamespaceSummaryManager, reconOMMetadataManager); expectedPath = "vol/bucket1/dir1/dir4/file6"; Assertions.assertEquals(expectedPath, fullPath); @@ -750,7 +750,7 @@ public void testConstructFullPath() throws IOException { .setParentObjectID(BUCKET_ONE_OBJECT_ID) .build(); fullPath = ReconUtils.constructFullPath(keyInfo, - reconNamespaceSummaryManager); + reconNamespaceSummaryManager, reconOMMetadataManager); expectedPath = "vol/bucket1/file1"; Assertions.assertEquals(expectedPath, fullPath); @@ -763,7 +763,7 @@ public void testConstructFullPath() throws IOException { .setParentObjectID(DIR_FIVE_OBJECT_ID) .build(); fullPath = ReconUtils.constructFullPath(keyInfo, - reconNamespaceSummaryManager); + reconNamespaceSummaryManager, reconOMMetadataManager); expectedPath = "vol2/bucket3/dir5/file9"; Assertions.assertEquals(expectedPath, fullPath); } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java index 25ac2255bf77..dba245ce8b80 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java @@ -710,7 +710,7 @@ public void testConstructFullPath() throws IOException { .build(); // Call constructFullPath and verify the result String fullPath = ReconUtils.constructFullPath(keyInfo, - reconNamespaceSummaryManager); + reconNamespaceSummaryManager, reconOMMetadataManager); String expectedPath = "vol/bucket1/dir1/dir2/file2"; Assertions.assertEquals(expectedPath, fullPath); @@ -722,7 +722,7 @@ public void testConstructFullPath() throws IOException { .setObjectID(DIR_TWO_OBJECT_ID) .build(); fullPath = ReconUtils.constructFullPath(keyInfo, - reconNamespaceSummaryManager); + reconNamespaceSummaryManager, reconOMMetadataManager); expectedPath = "vol/bucket1/dir1/dir2/"; Assertions.assertEquals(expectedPath, fullPath); @@ -734,7 +734,7 @@ public void testConstructFullPath() throws IOException { .setObjectID(KEY_SIX_OBJECT_ID) .build(); fullPath = ReconUtils.constructFullPath(keyInfo, - reconNamespaceSummaryManager); + reconNamespaceSummaryManager, reconOMMetadataManager); expectedPath = "vol/bucket1/dir1/dir4/file6"; Assertions.assertEquals(expectedPath, fullPath); } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java index 9961fb2c8916..6a2f2c557db8 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java @@ -917,7 +917,7 @@ public void testConstructFullPath() throws IOException { .setObjectID(KEY_TWO_OBJECT_ID) .build(); String fullPath = ReconUtils.constructFullPath(keyInfo, - reconNamespaceSummaryManager); + reconNamespaceSummaryManager, reconOMMetadataManager); String expectedPath = "vol/bucket1/" + KEY_TWO; Assertions.assertEquals(expectedPath, fullPath); @@ -928,7 +928,7 @@ public void testConstructFullPath() throws IOException { .setObjectID(KEY_FIVE_OBJECT_ID) .build(); fullPath = ReconUtils.constructFullPath(keyInfo, - reconNamespaceSummaryManager); + reconNamespaceSummaryManager, reconOMMetadataManager); expectedPath = "vol/bucket2/" + KEY_FIVE; Assertions.assertEquals(expectedPath, fullPath); @@ -939,7 +939,7 @@ public void testConstructFullPath() throws IOException { .setObjectID(KEY_EIGHT_OBJECT_ID) .build(); fullPath = ReconUtils.constructFullPath(keyInfo, - reconNamespaceSummaryManager); + reconNamespaceSummaryManager, reconOMMetadataManager); expectedPath = "vol2/bucket3/" + KEY_EIGHT; Assertions.assertEquals(expectedPath, fullPath); @@ -951,7 +951,7 @@ public void testConstructFullPath() throws IOException { .setObjectID(KEY_ELEVEN_OBJECT_ID) .build(); fullPath = ReconUtils.constructFullPath(keyInfo, - reconNamespaceSummaryManager); + reconNamespaceSummaryManager, reconOMMetadataManager); expectedPath = "vol2/bucket4/" + KEY_ELEVEN; Assertions.assertEquals(expectedPath, fullPath); } From dd1016d806f7a8bc928017942de11877b0d199a5 Mon Sep 17 00:00:00 2001 From: arafat Date: Mon, 29 Apr 2024 19:15:40 +0530 Subject: [PATCH 07/21] Revised the backporting strategy --- .../apache/hadoop/ozone/recon/ReconUtils.java | 20 ++++--------------- .../ozone/recon/api/types/NSSummary.java | 4 ++-- .../ozone/recon/codec/NSSummaryCodec.java | 8 ++++++-- .../tasks/NSSummaryTaskDbEventHandler.java | 10 ---------- 4 files changed, 12 insertions(+), 30 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java index 788d1d10c165..2e0e5a78b460 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java @@ -277,9 +277,11 @@ public static String constructFullPath(OmKeyInfo omKeyInfo, if (nsSummary == null) { break; } - if (!hasParentIdField(nsSummary)) { - // Call reprocess method if parentId is missing + if (nsSummary.getParentId() == -1) { + // Call reprocess method if parentId is negative, as it has not been set for this yet. reconNamespaceSummaryManager.rebuildNSSummaryTree(omMetadataManager); + return constructFullPath(omKeyInfo, reconNamespaceSummaryManager, + omMetadataManager); } fullPath.insert(0, nsSummary.getDirName() + OM_KEY_PREFIX); @@ -298,20 +300,6 @@ public static String constructFullPath(OmKeyInfo omKeyInfo, return fullPath.toString(); } - public static boolean hasParentIdField(NSSummary nsSummary) { - Field field; - try { - // This line attempts to get the Field object representing the parentId - // field from the NSSummary class. If the parentId field does not exist, - // a NoSuchFieldException will be thrown. - field = NSSummary.class.getDeclaredField("parentId"); - return true; - } catch (NoSuchFieldException e) { - return false; // Field not present - } - } - - /** * Make HTTP GET call on the URL and return HttpURLConnection instance. * diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NSSummary.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NSSummary.java index c51769928356..0f774f01bf48 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NSSummary.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NSSummary.java @@ -36,11 +36,11 @@ public class NSSummary { private int[] fileSizeBucket; private Set childDir; private String dirName; - private long parentId = -1; + private long parentId = 0; public NSSummary() { this(0, 0L, new int[ReconConstants.NUM_OF_FILE_SIZE_BINS], - new HashSet<>(), "", -1); // -1 can be a default value indicating no parent + new HashSet<>(), "", 0); } public NSSummary(int numOfFiles, diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java index 63eddc34e8d1..c8023ac9c234 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java @@ -113,8 +113,6 @@ public NSSummary fromPersistedFormat(byte[] rawData) throws IOException { int strLen = in.readInt(); if (strLen == 0) { - long parentId = in.readLong(); // Deserialize parentId - res.setParentId(parentId); return res; } byte[] buffer = new byte[strLen]; @@ -123,6 +121,12 @@ public NSSummary fromPersistedFormat(byte[] rawData) throws IOException { String dirName = stringCodec.fromPersistedFormat(buffer); res.setDirName(dirName); long parentId = in.readLong(); + if (parentId == 0) { + // Set the parent ID to -1 to indicate that it is old data from + // the cluster and the value has not yet been set. + res.setParentId(-1); + return res; + } res.setParentId(parentId); return res; } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java index fc121ad4fe06..b979307019ff 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java @@ -30,12 +30,10 @@ import org.slf4j.LoggerFactory; import java.io.IOException; -import java.lang.reflect.Field; import java.util.Map; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD_DEFAULT; -import static org.apache.hadoop.ozone.recon.ReconUtils.hasParentIdField; /** * Class for holding all NSSummaryTask methods @@ -128,10 +126,6 @@ protected void handlePutDirEvent(OmDirectoryInfo directoryInfo, // If we don't have it in this batch we try to get it from the DB curNSSummary = reconNamespaceSummaryManager.getNSSummary(objectId); } - if (!hasParentIdField(curNSSummary)) { - reconNamespaceSummaryManager.rebuildNSSummaryTree(reconOMMetadataManager); - return; - } if (curNSSummary == null) { // If we don't have it locally and in the DB we create a new instance // as this is a new ID @@ -151,10 +145,6 @@ protected void handlePutDirEvent(OmDirectoryInfo directoryInfo, // If we don't have it in this batch we try to get it from the DB nsSummary = reconNamespaceSummaryManager.getNSSummary(parentObjectId); } - if (!hasParentIdField(nsSummary)) { - reconNamespaceSummaryManager.rebuildNSSummaryTree(reconOMMetadataManager); - return; - } if (nsSummary == null) { // If we don't have it locally and in the DB we create a new instance // as this is a new ID From 0e7baa8c8a944e2960d479446ea82c290768d255 Mon Sep 17 00:00:00 2001 From: arafat Date: Tue, 30 Apr 2024 00:11:13 +0530 Subject: [PATCH 08/21] Made review changes --- .../recon/TestReconContainerEndpoint.java | 23 +++++++++++++++--- .../apache/hadoop/ozone/recon/ReconUtils.java | 24 +++++++++++++------ .../ozone/recon/codec/NSSummaryCodec.java | 13 +++++----- 3 files changed, 44 insertions(+), 16 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconContainerEndpoint.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconContainerEndpoint.java index 11aac69fe746..f4e8fff1d58c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconContainerEndpoint.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconContainerEndpoint.java @@ -1,3 +1,21 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.ozone.recon; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -17,7 +35,7 @@ import org.apache.hadoop.ozone.recon.persistence.ContainerHealthSchemaManager; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.apache.hadoop.ozone.recon.scm.ReconContainerManager; -import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; +import java.nio.charset.StandardCharsets; import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; @@ -26,7 +44,6 @@ import javax.ws.rs.core.Response; import java.io.IOException; import java.util.Collection; -import java.util.List; import static org.junit.jupiter.api.Assertions.*; @@ -201,7 +218,7 @@ private void writeTestData(String volumeName, String bucketName, try (OzoneOutputStream out = client.getObjectStore().getVolume(volumeName) .getBucket(bucketName) .createKey(keyPath, data.length())) { - out.write(data.getBytes()); + out.write(data.getBytes(StandardCharsets.UTF_8)); } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java index 2e0e5a78b460..5316bd35f129 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java @@ -33,6 +33,7 @@ import java.util.ArrayList; import java.util.List; import java.util.concurrent.BlockingQueue; +import java.util.concurrent.Executors; import java.util.stream.Collectors; import com.google.common.base.Preconditions; @@ -256,7 +257,7 @@ public void untarCheckpointFile(File tarFile, Path destPath) /** * Constructs the full path of a key from its OmKeyInfo using a bottom-up approach, starting from the leaf node. - *

+ * * The method begins with the leaf node (the key itself) and recursively prepends parent directory names, fetched * via NSSummary objects, until reaching the parent bucket (parentId is -1). It effectively builds the path from * bottom to top, finally prepending the volume and bucket names to complete the full path. @@ -272,16 +273,17 @@ public static String constructFullPath(OmKeyInfo omKeyInfo, StringBuilder fullPath = new StringBuilder(omKeyInfo.getKeyName()); long parentId = omKeyInfo.getParentObjectID(); boolean isDirectoryPresent = false; - while (parentId != -1) { + boolean rebuildTriggered = false; + + while (parentId != 0) { NSSummary nsSummary = reconNamespaceSummaryManager.getNSSummary(parentId); if (nsSummary == null) { break; } - if (nsSummary.getParentId() == -1) { - // Call reprocess method if parentId is negative, as it has not been set for this yet. - reconNamespaceSummaryManager.rebuildNSSummaryTree(omMetadataManager); - return constructFullPath(omKeyInfo, reconNamespaceSummaryManager, - omMetadataManager); + if (nsSummary.getParentId() == -1 && !rebuildTriggered) { + // Trigger rebuild asynchronously and continue path construction + triggerRebuild(reconNamespaceSummaryManager, omMetadataManager); + rebuildTriggered = true; // Prevent multiple rebuild triggers } fullPath.insert(0, nsSummary.getDirName() + OM_KEY_PREFIX); @@ -300,6 +302,14 @@ public static String constructFullPath(OmKeyInfo omKeyInfo, return fullPath.toString(); } + private static void triggerRebuild(ReconNamespaceSummaryManager reconNamespaceSummaryManager, + ReconOMMetadataManager omMetadataManager) { + // Run this method in a separate thread + Executors.newSingleThreadExecutor().submit(() -> { + reconNamespaceSummaryManager.rebuildNSSummaryTree(omMetadataManager); + }); + } + /** * Make HTTP GET call on the URL and return HttpURLConnection instance. * diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java index c8023ac9c234..f3b273451a2d 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java @@ -120,14 +120,15 @@ public NSSummary fromPersistedFormat(byte[] rawData) throws IOException { assert (bytesRead == strLen); String dirName = stringCodec.fromPersistedFormat(buffer); res.setDirName(dirName); - long parentId = in.readLong(); - if (parentId == 0) { - // Set the parent ID to -1 to indicate that it is old data from - // the cluster and the value has not yet been set. + + // Check if there is enough data available to read the parentId + if (in.available() >= Long.BYTES) { + long parentId = in.readLong(); + res.setParentId(parentId); + } else { + // Set default parentId to -1 indicating it's from old format res.setParentId(-1); - return res; } - res.setParentId(parentId); return res; } From 76f0e4143417e02f0a7c4d038984d1668243aea8 Mon Sep 17 00:00:00 2001 From: arafat Date: Fri, 3 May 2024 13:10:43 +0530 Subject: [PATCH 09/21] Made 2nd review changes --- .../apache/hadoop/ozone/recon/ReconUtils.java | 44 ++++++++++++++----- .../tasks/NSSummaryTaskDbEventHandler.java | 4 +- 2 files changed, 35 insertions(+), 13 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java index 5316bd35f129..ca79aba1041b 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java @@ -91,6 +91,8 @@ public ReconUtils() { private static final Logger LOG = LoggerFactory.getLogger( ReconUtils.class); + private static volatile boolean rebuildTriggered = false; + private static volatile boolean isRebuilding = false; public static File getReconScmDbDir(ConfigurationSource conf) { return new ReconUtils().getReconDbDir(conf, OZONE_RECON_SCM_DB_DIR); @@ -260,20 +262,29 @@ public void untarCheckpointFile(File tarFile, Path destPath) * * The method begins with the leaf node (the key itself) and recursively prepends parent directory names, fetched * via NSSummary objects, until reaching the parent bucket (parentId is -1). It effectively builds the path from - * bottom to top, finally prepending the volume and bucket names to complete the full path. + * bottom to top, finally prepending the volume and bucket names to complete the full path. If the directory structure + * is currently being rebuilt (indicated by the isRebuilding flag), this method returns an empty string to signify + * that path construction is temporarily unavailable. * * @param omKeyInfo The OmKeyInfo object for the key - * @return The constructed full path of the key as a String. + * @return The constructed full path of the key as a String, or an empty string if a rebuild is in progress and + * the path cannot be constructed at this time. * @throws IOException */ + public static String constructFullPath(OmKeyInfo omKeyInfo, ReconNamespaceSummaryManager reconNamespaceSummaryManager, ReconOMMetadataManager omMetadataManager) throws IOException { + + // Return empty string to signify that path construction is temporarily unavailable + if (isRebuilding) { + return ""; + } + StringBuilder fullPath = new StringBuilder(omKeyInfo.getKeyName()); long parentId = omKeyInfo.getParentObjectID(); boolean isDirectoryPresent = false; - boolean rebuildTriggered = false; while (parentId != 0) { NSSummary nsSummary = reconNamespaceSummaryManager.getNSSummary(parentId); @@ -281,9 +292,12 @@ public static String constructFullPath(OmKeyInfo omKeyInfo, break; } if (nsSummary.getParentId() == -1 && !rebuildTriggered) { - // Trigger rebuild asynchronously and continue path construction - triggerRebuild(reconNamespaceSummaryManager, omMetadataManager); - rebuildTriggered = true; // Prevent multiple rebuild triggers + synchronized (ReconUtils.class) { + if (!rebuildTriggered) { + triggerRebuild(reconNamespaceSummaryManager, omMetadataManager); + rebuildTriggered = true; // Set the flag to true inside a synchronized block + } + } } fullPath.insert(0, nsSummary.getDirName() + OM_KEY_PREFIX); @@ -304,10 +318,20 @@ public static String constructFullPath(OmKeyInfo omKeyInfo, private static void triggerRebuild(ReconNamespaceSummaryManager reconNamespaceSummaryManager, ReconOMMetadataManager omMetadataManager) { - // Run this method in a separate thread - Executors.newSingleThreadExecutor().submit(() -> { - reconNamespaceSummaryManager.rebuildNSSummaryTree(omMetadataManager); - }); + synchronized (ReconUtils.class) { + if (!isRebuilding) { + isRebuilding = true; + Executors.newSingleThreadExecutor().submit(() -> { + try { + reconNamespaceSummaryManager.rebuildNSSummaryTree( + omMetadataManager); + } finally { + isRebuilding = false; + rebuildTriggered = false; + } + }); + } + } } /** diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java index b979307019ff..888ec5319f2f 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java @@ -133,9 +133,7 @@ protected void handlePutDirEvent(OmDirectoryInfo directoryInfo, } curNSSummary.setDirName(dirName); // Set the parent directory ID - if (parentObjectId != -1) { - curNSSummary.setParentId(parentObjectId); - } + curNSSummary.setParentId(parentObjectId); nsSummaryMap.put(objectId, curNSSummary); // Write the child dir list to the parent directory From 89f5d1e4361ab9335db8c29cb41796246474b104 Mon Sep 17 00:00:00 2001 From: arafat Date: Mon, 6 May 2024 13:40:57 +0530 Subject: [PATCH 10/21] Added command to terminate the thread --- .../org/apache/hadoop/ozone/recon/ReconUtils.java | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java index ca79aba1041b..6dbf41f0a187 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java @@ -33,6 +33,7 @@ import java.util.ArrayList; import java.util.List; import java.util.concurrent.BlockingQueue; +import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.stream.Collectors; @@ -321,13 +322,16 @@ private static void triggerRebuild(ReconNamespaceSummaryManager reconNamespaceSu synchronized (ReconUtils.class) { if (!isRebuilding) { isRebuilding = true; - Executors.newSingleThreadExecutor().submit(() -> { + ExecutorService executor = Executors.newSingleThreadExecutor(); + executor.submit(() -> { try { - reconNamespaceSummaryManager.rebuildNSSummaryTree( - omMetadataManager); + reconNamespaceSummaryManager.rebuildNSSummaryTree(omMetadataManager); } finally { - isRebuilding = false; - rebuildTriggered = false; + synchronized (ReconUtils.class) { + isRebuilding = false; + rebuildTriggered = false; + } + executor.shutdown(); } }); } From 1c41bc6711027efe6fc90d97ffeb7adfb2211207 Mon Sep 17 00:00:00 2001 From: arafat Date: Mon, 6 May 2024 13:46:37 +0530 Subject: [PATCH 11/21] Removed the unnecessary synchronised block --- .../java/org/apache/hadoop/ozone/recon/ReconUtils.java | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java index 6dbf41f0a187..e18d53b2396b 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java @@ -322,16 +322,14 @@ private static void triggerRebuild(ReconNamespaceSummaryManager reconNamespaceSu synchronized (ReconUtils.class) { if (!isRebuilding) { isRebuilding = true; - ExecutorService executor = Executors.newSingleThreadExecutor(); + ExecutorService executor = Executors.newSingleThreadExecutor(); // Create the executor executor.submit(() -> { try { reconNamespaceSummaryManager.rebuildNSSummaryTree(omMetadataManager); } finally { - synchronized (ReconUtils.class) { - isRebuilding = false; - rebuildTriggered = false; - } - executor.shutdown(); + isRebuilding = false; + rebuildTriggered = false; + executor.shutdown(); // Shutdown the executor here } }); } From 5773c187594be36591b888216921aa126786b9ea Mon Sep 17 00:00:00 2001 From: arafat Date: Tue, 7 May 2024 12:32:21 +0530 Subject: [PATCH 12/21] Made final review comments --- .../apache/hadoop/ozone/recon/ReconUtils.java | 48 +++++++++---------- 1 file changed, 23 insertions(+), 25 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java index e18d53b2396b..a226773216ec 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java @@ -35,6 +35,7 @@ import java.util.concurrent.BlockingQueue; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Collectors; import com.google.common.base.Preconditions; @@ -92,8 +93,9 @@ public ReconUtils() { private static final Logger LOG = LoggerFactory.getLogger( ReconUtils.class); - private static volatile boolean rebuildTriggered = false; - private static volatile boolean isRebuilding = false; + + private static AtomicBoolean rebuildTriggered = new AtomicBoolean(false); + private static AtomicBoolean isRebuilding = new AtomicBoolean(false); public static File getReconScmDbDir(ConfigurationSource conf) { return new ReconUtils().getReconDbDir(conf, OZONE_RECON_SCM_DB_DIR); @@ -278,8 +280,8 @@ public static String constructFullPath(OmKeyInfo omKeyInfo, ReconOMMetadataManager omMetadataManager) throws IOException { - // Return empty string to signify that path construction is temporarily unavailable - if (isRebuilding) { + // Return empty string if rebuild is triggered or still in progress + if (isRebuilding.get() || rebuildTriggered.get()) { return ""; } @@ -292,13 +294,8 @@ public static String constructFullPath(OmKeyInfo omKeyInfo, if (nsSummary == null) { break; } - if (nsSummary.getParentId() == -1 && !rebuildTriggered) { - synchronized (ReconUtils.class) { - if (!rebuildTriggered) { - triggerRebuild(reconNamespaceSummaryManager, omMetadataManager); - rebuildTriggered = true; // Set the flag to true inside a synchronized block - } - } + if (nsSummary.getParentId() == -1 && rebuildTriggered.compareAndSet(false, true)) { + triggerRebuild(reconNamespaceSummaryManager, omMetadataManager); } fullPath.insert(0, nsSummary.getDirName() + OM_KEY_PREFIX); @@ -319,20 +316,21 @@ public static String constructFullPath(OmKeyInfo omKeyInfo, private static void triggerRebuild(ReconNamespaceSummaryManager reconNamespaceSummaryManager, ReconOMMetadataManager omMetadataManager) { - synchronized (ReconUtils.class) { - if (!isRebuilding) { - isRebuilding = true; - ExecutorService executor = Executors.newSingleThreadExecutor(); // Create the executor - executor.submit(() -> { - try { - reconNamespaceSummaryManager.rebuildNSSummaryTree(omMetadataManager); - } finally { - isRebuilding = false; - rebuildTriggered = false; - executor.shutdown(); // Shutdown the executor here - } - }); - } + if (!isRebuilding.getAndSet(true)) { + ExecutorService executor = Executors.newSingleThreadExecutor(r -> { + Thread t = new Thread(r); + t.setName("RebuildNSSummaryThread"); // Setting a descriptive name for the thread + return t; + }); + + executor.submit(() -> { + try { + reconNamespaceSummaryManager.rebuildNSSummaryTree(omMetadataManager); + } finally { + isRebuilding.set(false); + } + }); + executor.shutdown(); } } From 415d3155cdbd85c6984b27b6673d55061b9ce060 Mon Sep 17 00:00:00 2001 From: arafat Date: Tue, 7 May 2024 17:44:51 +0530 Subject: [PATCH 13/21] Added more logs and removed the boolean flag --- .../apache/hadoop/ozone/recon/ReconUtils.java | 40 +++++++++---------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java index a226773216ec..049048d0e03f 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java @@ -274,17 +274,11 @@ public void untarCheckpointFile(File tarFile, Path destPath) * the path cannot be constructed at this time. * @throws IOException */ - public static String constructFullPath(OmKeyInfo omKeyInfo, ReconNamespaceSummaryManager reconNamespaceSummaryManager, ReconOMMetadataManager omMetadataManager) throws IOException { - // Return empty string if rebuild is triggered or still in progress - if (isRebuilding.get() || rebuildTriggered.get()) { - return ""; - } - StringBuilder fullPath = new StringBuilder(omKeyInfo.getKeyName()); long parentId = omKeyInfo.getParentObjectID(); boolean isDirectoryPresent = false; @@ -292,10 +286,15 @@ public static String constructFullPath(OmKeyInfo omKeyInfo, while (parentId != 0) { NSSummary nsSummary = reconNamespaceSummaryManager.getNSSummary(parentId); if (nsSummary == null) { - break; + LOG.warn("NSSummary tree is currently being rebuilt, returning empty string for path construction."); + return ""; } - if (nsSummary.getParentId() == -1 && rebuildTriggered.compareAndSet(false, true)) { - triggerRebuild(reconNamespaceSummaryManager, omMetadataManager); + if (nsSummary.getParentId() == -1) { + if (rebuildTriggered.compareAndSet(false, true)){ + triggerRebuild(reconNamespaceSummaryManager, omMetadataManager); + } + LOG.warn("NSSummary tree is currently being rebuilt, returning empty string for path construction."); + return ""; } fullPath.insert(0, nsSummary.getDirName() + OM_KEY_PREFIX); @@ -316,22 +315,23 @@ public static String constructFullPath(OmKeyInfo omKeyInfo, private static void triggerRebuild(ReconNamespaceSummaryManager reconNamespaceSummaryManager, ReconOMMetadataManager omMetadataManager) { - if (!isRebuilding.getAndSet(true)) { ExecutorService executor = Executors.newSingleThreadExecutor(r -> { Thread t = new Thread(r); - t.setName("RebuildNSSummaryThread"); // Setting a descriptive name for the thread + t.setName("RebuildNSSummaryThread"); return t; }); - executor.submit(() -> { - try { - reconNamespaceSummaryManager.rebuildNSSummaryTree(omMetadataManager); - } finally { - isRebuilding.set(false); - } - }); - executor.shutdown(); - } + executor.submit(() -> { + long startTime = System.currentTimeMillis(); + LOG.info("Rebuilding NSSummary tree..."); + try { + reconNamespaceSummaryManager.rebuildNSSummaryTree(omMetadataManager); + } finally { + long endTime = System.currentTimeMillis(); + LOG.info("NSSummary tree rebuild completed in {} ms.", endTime - startTime); + } + }); + executor.shutdown(); } /** From f2e8511afd4b31c34f57f3ee039764b2d9ca24f8 Mon Sep 17 00:00:00 2001 From: arafat Date: Tue, 7 May 2024 17:50:59 +0530 Subject: [PATCH 14/21] Fixed checkstyle issues --- .../ozone/recon/TestReconContainerEndpoint.java | 6 +++++- .../org/apache/hadoop/ozone/recon/ReconUtils.java | 13 ++++++------- .../hadoop/ozone/recon/api/ContainerEndpoint.java | 1 - 3 files changed, 11 insertions(+), 9 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconContainerEndpoint.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconContainerEndpoint.java index f4e8fff1d58c..b9a30890b0e3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconContainerEndpoint.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconContainerEndpoint.java @@ -45,8 +45,12 @@ import java.io.IOException; import java.util.Collection; -import static org.junit.jupiter.api.Assertions.*; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertEquals; +/** + * Integration test fo recon container endpoint. + */ public class TestReconContainerEndpoint { private static OzoneClient client; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java index 049048d0e03f..b8c30fb57f54 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java @@ -24,7 +24,6 @@ import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.IOException; -import java.lang.reflect.Field; import java.net.HttpURLConnection; import java.net.URL; import java.nio.file.Path; @@ -290,7 +289,7 @@ public static String constructFullPath(OmKeyInfo omKeyInfo, return ""; } if (nsSummary.getParentId() == -1) { - if (rebuildTriggered.compareAndSet(false, true)){ + if (rebuildTriggered.compareAndSet(false, true)) { triggerRebuild(reconNamespaceSummaryManager, omMetadataManager); } LOG.warn("NSSummary tree is currently being rebuilt, returning empty string for path construction."); @@ -315,11 +314,11 @@ public static String constructFullPath(OmKeyInfo omKeyInfo, private static void triggerRebuild(ReconNamespaceSummaryManager reconNamespaceSummaryManager, ReconOMMetadataManager omMetadataManager) { - ExecutorService executor = Executors.newSingleThreadExecutor(r -> { - Thread t = new Thread(r); - t.setName("RebuildNSSummaryThread"); - return t; - }); + ExecutorService executor = Executors.newSingleThreadExecutor(r -> { + Thread t = new Thread(r); + t.setName("RebuildNSSummaryThread"); + return t; + }); executor.submit(() -> { long startTime = System.currentTimeMillis(); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java index 3e6642766696..86ef6c022d57 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java @@ -27,7 +27,6 @@ import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException; import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; -import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; From 5cb1cc53c922acf2ab57bfb756b270f5eb9d2d58 Mon Sep 17 00:00:00 2001 From: arafat Date: Wed, 8 May 2024 11:18:57 +0530 Subject: [PATCH 15/21] Fixed failing integration teasrt --- .../ozone/recon/TestContainerEndpoint2.java | 4 +++ .../recon/TestReconContainerEndpoint.java | 31 +++++++------------ 2 files changed, 15 insertions(+), 20 deletions(-) create mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestContainerEndpoint2.java diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestContainerEndpoint2.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestContainerEndpoint2.java new file mode 100644 index 000000000000..37bb8889531f --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestContainerEndpoint2.java @@ -0,0 +1,4 @@ +package org.apache.hadoop.ozone.recon; + +public class TestContainerEndpoint2 { +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconContainerEndpoint.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconContainerEndpoint.java index b9a30890b0e3..da933b278b95 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconContainerEndpoint.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconContainerEndpoint.java @@ -37,9 +37,7 @@ import org.apache.hadoop.ozone.recon.scm.ReconContainerManager; import java.nio.charset.StandardCharsets; import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; -import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.*; import javax.ws.rs.core.Response; import java.io.IOException; @@ -58,8 +56,8 @@ public class TestReconContainerEndpoint { private static OzoneConfiguration conf; private static ObjectStore store; - @BeforeAll - public static void init() throws Exception { + @BeforeEach + public void init() throws Exception { conf = new OzoneConfiguration(); conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT, OMConfigKeys.OZONE_BUCKET_LAYOUT_FILE_SYSTEM_OPTIMIZED); @@ -72,8 +70,8 @@ public static void init() throws Exception { store = client.getObjectStore(); } - @AfterAll - public static void shutdown() throws IOException { + @AfterEach + public void shutdown() throws IOException { if (client != null) { client.close(); } @@ -108,10 +106,7 @@ public void testContainerEndpointForFSOLayout() throws Exception { impl.syncDataFromOM(); //Search for the bucket from the bucket table and verify its FSO - String buckKey = "/" + volName + "/" + bucketName; - OmBucketInfo bucketInfo = - cluster.getReconServer().getOzoneManagerServiceProvider() - .getOMMetadataManagerInstance().getBucketTable().get(buckKey); + OmBucketInfo bucketInfo = cluster.getOzoneManager().getBucketInfo(volName, bucketName); assertNotNull(bucketInfo); assertEquals(BucketLayout.FILE_SYSTEM_OPTIMIZED, bucketInfo.getBucketLayout()); @@ -135,8 +130,7 @@ public void testContainerEndpointForFSOLayout() throws Exception { // Assert the file name and the complete path. KeyMetadata keyMetadata = keyMetadataList.iterator().next(); assertEquals("file1", keyMetadata.getKey()); - assertEquals("testvol/testbucket/dir1/dir2/dir3/file1", - keyMetadata.getCompletePath()); + assertEquals("testvol/fsobucket/dir1/dir2/dir3/file1", keyMetadata.getCompletePath()); testContainerID = 2L; response = getContainerEndpointResponse(testContainerID); @@ -148,12 +142,12 @@ public void testContainerEndpointForFSOLayout() throws Exception { // Assert the file name and the complete path. keyMetadata = keyMetadataList.iterator().next(); assertEquals("file1", keyMetadata.getKey()); - assertEquals("testvol/testbucket/file1", keyMetadata.getCompletePath()); + assertEquals("testvol/fsobucket/file1", keyMetadata.getCompletePath()); } @Test public void testContainerEndpointForOBSBucket() throws Exception { - String volumeName = "testvol"; + String volumeName = "testvol2"; String obsBucketName = "obsbucket"; String obsSingleFileKey = "file1"; @@ -173,10 +167,7 @@ public void testContainerEndpointForOBSBucket() throws Exception { impl.syncDataFromOM(); // Search for the bucket from the bucket table and verify its OBS - String bucketKey = "/" + volumeName + "/" + obsBucketName; - OmBucketInfo bucketInfo = - cluster.getReconServer().getOzoneManagerServiceProvider() - .getOMMetadataManagerInstance().getBucketTable().get(bucketKey); + OmBucketInfo bucketInfo = cluster.getOzoneManager().getBucketInfo(volumeName, obsBucketName); assertNotNull(bucketInfo); assertEquals(BucketLayout.OBJECT_STORE, bucketInfo.getBucketLayout()); @@ -195,7 +186,7 @@ public void testContainerEndpointForOBSBucket() throws Exception { KeyMetadata keyMetadata = keyMetadataList.iterator().next(); assertEquals("file1", keyMetadata.getKey()); - assertEquals("testvol/obsbucket/file1", keyMetadata.getCompletePath()); + assertEquals("testvol2/obsbucket/file1", keyMetadata.getCompletePath()); } private Response getContainerEndpointResponse(long containerId) { From 248a375ea6e8346ea9034c96de1336323fe7ec3e Mon Sep 17 00:00:00 2001 From: arafat Date: Wed, 8 May 2024 12:32:46 +0530 Subject: [PATCH 16/21] Added tests to check for backporting --- .../ozone/recon/TestContainerEndpoint2.java | 4 - .../apache/hadoop/ozone/recon/ReconUtils.java | 8 +- .../api/TestNSSummaryEndpointWithFSO.java | 92 +++++++++++++++++-- 3 files changed, 90 insertions(+), 14 deletions(-) delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestContainerEndpoint2.java diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestContainerEndpoint2.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestContainerEndpoint2.java deleted file mode 100644 index 37bb8889531f..000000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestContainerEndpoint2.java +++ /dev/null @@ -1,4 +0,0 @@ -package org.apache.hadoop.ozone.recon; - -public class TestContainerEndpoint2 { -} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java index b8c30fb57f54..3c709fcc64d9 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java @@ -75,6 +75,7 @@ import org.hadoop.ozone.recon.schema.tables.daos.GlobalStatsDao; import org.hadoop.ozone.recon.schema.tables.pojos.GlobalStats; import jakarta.annotation.Nonnull; +import org.jetbrains.annotations.TestOnly; import org.jooq.Configuration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -90,7 +91,7 @@ public class ReconUtils { public ReconUtils() { } - private static final Logger LOG = LoggerFactory.getLogger( + private static Logger LOG = LoggerFactory.getLogger( ReconUtils.class); private static AtomicBoolean rebuildTriggered = new AtomicBoolean(false); @@ -503,4 +504,9 @@ public SCMNodeDetails getReconNodeDetails(OzoneConfiguration conf) { HddsServerUtil.getReconDataNodeBindAddress(conf)); return builder.build(); } + + @TestOnly + public static void setLogger(Logger logger) { + LOG = logger; + } } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java index 82669edf912d..c0a9bfdfcec0 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java @@ -46,10 +46,7 @@ import org.apache.hadoop.ozone.recon.ReconUtils; import org.apache.hadoop.ozone.recon.api.handlers.BucketHandler; import org.apache.hadoop.ozone.recon.api.handlers.EntityHandler; -import org.apache.hadoop.ozone.recon.api.types.DUResponse; -import org.apache.hadoop.ozone.recon.api.types.FileSizeDistributionResponse; -import org.apache.hadoop.ozone.recon.api.types.ResponseStatus; -import org.apache.hadoop.ozone.recon.api.types.QuotaUsageResponse; +import org.apache.hadoop.ozone.recon.api.types.*; import org.apache.hadoop.ozone.recon.common.CommonUtils; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.apache.hadoop.ozone.recon.scm.ReconNodeManager; @@ -63,6 +60,8 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; +import org.mockito.ArgumentCaptor; +import org.slf4j.Logger; import javax.ws.rs.core.Response; @@ -77,8 +76,6 @@ import java.util.Set; import java.util.HashSet; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDirToOm; @@ -86,8 +83,8 @@ import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getMockOzoneManagerServiceProviderWithFSO; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; +import static org.junit.jupiter.api.Assertions.*; +import static org.mockito.Mockito.*; /** * Test for NSSummary REST APIs with FSO. @@ -766,8 +763,85 @@ public void testConstructFullPath() throws IOException { reconNamespaceSummaryManager, reconOMMetadataManager); expectedPath = "vol2/bucket3/dir5/file9"; Assertions.assertEquals(expectedPath, fullPath); + + // Check for when we encounter a NSSUmamry with parentId -1 + // Fetch NSSummary for dir1 and immediately update its parentId. + NSSummary dir1Summary = reconNamespaceSummaryManager.getNSSummary(DIR_ONE_OBJECT_ID); + dir1Summary.setParentId(-1); // Update parentId to -1 + + reconNamespaceSummaryManager.deleteNSSummary(DIR_ONE_OBJECT_ID); + reconNamespaceSummaryManager.storeNSSummary(DIR_ONE_OBJECT_ID, dir1Summary); + + NSSummary changedDir1Summary = reconNamespaceSummaryManager.getNSSummary(DIR_ONE_OBJECT_ID); + Assertions.assertEquals(-1, changedDir1Summary.getParentId(), "The parentId should be updated to -1"); + + keyInfo = new OmKeyInfo.Builder() + .setKeyName("file2") + .setVolumeName(VOL) + .setBucketName(BUCKET_ONE) + .setObjectID(KEY_TWO_OBJECT_ID) + .setParentObjectID(DIR_TWO_OBJECT_ID) + .build(); + // Call constructFullPath and verify the result + fullPath = ReconUtils.constructFullPath(keyInfo, + reconNamespaceSummaryManager, reconOMMetadataManager); } + @Test + public void testConstructFullPathWithNegativeParentIdTriggersRebuild() throws IOException { + // Setup + long dirOneObjectId = 1L; // Sample object ID for the directory + ReconNamespaceSummaryManager mockSummaryManager = mock(ReconNamespaceSummaryManager.class); + ReconOMMetadataManager mockMetadataManager = mock(ReconOMMetadataManager.class); + NSSummary dir1Summary = new NSSummary(); + dir1Summary.setParentId(-1); // Simulate directory at the top of the tree + when(mockSummaryManager.getNSSummary(dirOneObjectId)).thenReturn(dir1Summary); + + OmKeyInfo keyInfo = new OmKeyInfo.Builder() + .setKeyName("file2") + .setVolumeName("vol") + .setBucketName("bucket1") + .setObjectID(2L) + .setParentObjectID(dirOneObjectId) + .build(); + + String result = ReconUtils.constructFullPath(keyInfo, mockSummaryManager, mockMetadataManager); + assertEquals("", result, "Expected an empty string return due to rebuild trigger"); + } + + @Test + public void testLoggingWhenParentIdIsNegative() throws IOException { + ReconNamespaceSummaryManager mockManager = + mock(ReconNamespaceSummaryManager.class); + Logger mockLogger = mock(Logger.class); + ReconUtils.setLogger(mockLogger); + + NSSummary mockSummary = new NSSummary(); + mockSummary.setParentId(-1); + when(mockManager.getNSSummary(anyLong())).thenReturn(mockSummary); + + OmKeyInfo keyInfo = new OmKeyInfo.Builder() + .setKeyName("testKey") + .setVolumeName("vol") + .setBucketName("bucket") + .setObjectID(1L) + .setParentObjectID(1L) + .build(); + + ReconUtils.constructFullPath(keyInfo, mockManager, null); + + // Assert + ArgumentCaptor logCaptor = ArgumentCaptor.forClass(String.class); + verify(mockLogger).warn(logCaptor.capture()); + String loggedMessage = logCaptor.getValue(); + + // Here we can assert the exact message we expect to see in the logs. + assertEquals( + "NSSummary tree is currently being rebuilt, returning empty string " + + "for path construction.", loggedMessage); + } + + /** * Write directories and keys info into OM DB. * @throws Exception @@ -1324,7 +1398,7 @@ private static BucketLayout getBucketLayout() { } private static SCMNodeStat getMockSCMRootStat() { - return new SCMNodeStat(ROOT_QUOTA, ROOT_DATA_SIZE, + return new SCMNodeStat(ROOT_QUOTA, ROOT_DATA_SIZE, ROOT_QUOTA - ROOT_DATA_SIZE, 0, ROOT_QUOTA - ROOT_DATA_SIZE - 1); } } From 047ac8b399378d7877aa54a85a2953a1e93776e9 Mon Sep 17 00:00:00 2001 From: arafat Date: Wed, 8 May 2024 13:09:23 +0530 Subject: [PATCH 17/21] Fixed checkstyle issues --- .../ozone/recon/TestReconContainerEndpoint.java | 4 +++- .../recon/api/TestNSSummaryEndpointWithFSO.java | 14 +++++++++++--- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconContainerEndpoint.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconContainerEndpoint.java index da933b278b95..95a5c0c19927 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconContainerEndpoint.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconContainerEndpoint.java @@ -37,7 +37,9 @@ import org.apache.hadoop.ozone.recon.scm.ReconContainerManager; import java.nio.charset.StandardCharsets; import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; -import org.junit.jupiter.api.*; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import javax.ws.rs.core.Response; import java.io.IOException; diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java index c0a9bfdfcec0..54da926601e5 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java @@ -46,7 +46,11 @@ import org.apache.hadoop.ozone.recon.ReconUtils; import org.apache.hadoop.ozone.recon.api.handlers.BucketHandler; import org.apache.hadoop.ozone.recon.api.handlers.EntityHandler; -import org.apache.hadoop.ozone.recon.api.types.*; +import org.apache.hadoop.ozone.recon.api.types.DUResponse; +import org.apache.hadoop.ozone.recon.api.types.NSSummary; +import org.apache.hadoop.ozone.recon.api.types.QuotaUsageResponse; +import org.apache.hadoop.ozone.recon.api.types.ResponseStatus; +import org.apache.hadoop.ozone.recon.api.types.FileSizeDistributionResponse; import org.apache.hadoop.ozone.recon.common.CommonUtils; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.apache.hadoop.ozone.recon.scm.ReconNodeManager; @@ -83,8 +87,12 @@ import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getMockOzoneManagerServiceProviderWithFSO; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD; -import static org.junit.jupiter.api.Assertions.*; -import static org.mockito.Mockito.*; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import static org.mockito.Mockito.anyLong; +import static org.mockito.Mockito.verify; /** * Test for NSSummary REST APIs with FSO. From fafc2e3d4632d01990f5f37cf0e99009d2f29543 Mon Sep 17 00:00:00 2001 From: arafat Date: Wed, 8 May 2024 13:40:02 +0530 Subject: [PATCH 18/21] Fixed the missed checkstyle --- .../apache/hadoop/ozone/recon/ReconUtils.java | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java index 3c709fcc64d9..5442a1536e23 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java @@ -91,7 +91,7 @@ public class ReconUtils { public ReconUtils() { } - private static Logger LOG = LoggerFactory.getLogger( + private static Logger log = LoggerFactory.getLogger( ReconUtils.class); private static AtomicBoolean rebuildTriggered = new AtomicBoolean(false); @@ -136,7 +136,7 @@ public File getReconDbDir(ConfigurationSource conf, String dirConfigKey) { return metadataDir; } - LOG.warn("{} is not configured. We recommend adding this setting. " + + log.warn("{} is not configured. We recommend adding this setting. " + "Falling back to {} instead.", dirConfigKey, HddsConfigKeys.OZONE_METADATA_DIRS); return getOzoneMetaDirPath(conf); @@ -171,7 +171,7 @@ public static File createTarFile(Path sourcePath) throws IOException { org.apache.hadoop.io.IOUtils.closeStream(tarOs); org.apache.hadoop.io.IOUtils.closeStream(fileOutputStream); } catch (Exception e) { - LOG.error("Exception encountered when closing " + + log.error("Exception encountered when closing " + "TAR file output stream: " + e); } } @@ -236,7 +236,7 @@ public void untarCheckpointFile(File tarFile, Path destPath) if (entry.isDirectory()) { boolean success = f.mkdirs(); if (!success) { - LOG.error("Unable to create directory found in tar."); + log.error("Unable to create directory found in tar."); } } else { //Write contents of file in archive to a new file. @@ -286,14 +286,14 @@ public static String constructFullPath(OmKeyInfo omKeyInfo, while (parentId != 0) { NSSummary nsSummary = reconNamespaceSummaryManager.getNSSummary(parentId); if (nsSummary == null) { - LOG.warn("NSSummary tree is currently being rebuilt, returning empty string for path construction."); + log.warn("NSSummary tree is currently being rebuilt, returning empty string for path construction."); return ""; } if (nsSummary.getParentId() == -1) { if (rebuildTriggered.compareAndSet(false, true)) { triggerRebuild(reconNamespaceSummaryManager, omMetadataManager); } - LOG.warn("NSSummary tree is currently being rebuilt, returning empty string for path construction."); + log.warn("NSSummary tree is currently being rebuilt, returning empty string for path construction."); return ""; } fullPath.insert(0, nsSummary.getDirName() + OM_KEY_PREFIX); @@ -323,12 +323,12 @@ private static void triggerRebuild(ReconNamespaceSummaryManager reconNamespaceSu executor.submit(() -> { long startTime = System.currentTimeMillis(); - LOG.info("Rebuilding NSSummary tree..."); + log.info("Rebuilding NSSummary tree..."); try { reconNamespaceSummaryManager.rebuildNSSummaryTree(omMetadataManager); } finally { long endTime = System.currentTimeMillis(); - LOG.info("NSSummary tree rebuild completed in {} ms.", endTime - startTime); + log.info("NSSummary tree rebuild completed in {} ms.", endTime - startTime); } }); executor.shutdown(); @@ -379,7 +379,7 @@ public File getLastKnownDB(File reconDbDir, String fileNamePrefix) { lastKnownSnapshotFileName = fileName; } } catch (NumberFormatException nfEx) { - LOG.warn("Unknown file found in Recon DB dir : {}", fileName); + log.warn("Unknown file found in Recon DB dir : {}", fileName); } } } @@ -507,6 +507,6 @@ public SCMNodeDetails getReconNodeDetails(OzoneConfiguration conf) { @TestOnly public static void setLogger(Logger logger) { - LOG = logger; + log = logger; } } From 50ac78f77804feadf585407a31f0f2843dfdfa7c Mon Sep 17 00:00:00 2001 From: arafat Date: Wed, 8 May 2024 14:36:31 +0530 Subject: [PATCH 19/21] Fixed the final review comments --- .../java/org/apache/hadoop/ozone/recon/ReconUtils.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java index 5442a1536e23..b8a9ce5a2bfb 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java @@ -75,7 +75,7 @@ import org.hadoop.ozone.recon.schema.tables.daos.GlobalStatsDao; import org.hadoop.ozone.recon.schema.tables.pojos.GlobalStats; import jakarta.annotation.Nonnull; -import org.jetbrains.annotations.TestOnly; +import com.google.common.annotations.VisibleForTesting; import org.jooq.Configuration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -95,7 +95,6 @@ public ReconUtils() { ReconUtils.class); private static AtomicBoolean rebuildTriggered = new AtomicBoolean(false); - private static AtomicBoolean isRebuilding = new AtomicBoolean(false); public static File getReconScmDbDir(ConfigurationSource conf) { return new ReconUtils().getReconDbDir(conf, OZONE_RECON_SCM_DB_DIR); @@ -286,7 +285,8 @@ public static String constructFullPath(OmKeyInfo omKeyInfo, while (parentId != 0) { NSSummary nsSummary = reconNamespaceSummaryManager.getNSSummary(parentId); if (nsSummary == null) { - log.warn("NSSummary tree is currently being rebuilt, returning empty string for path construction."); + log.warn("NSSummary tree is currently being rebuilt or the directory could be in the progress of " + + "deletion, returning empty string for path construction."); return ""; } if (nsSummary.getParentId() == -1) { @@ -505,7 +505,7 @@ public SCMNodeDetails getReconNodeDetails(OzoneConfiguration conf) { return builder.build(); } - @TestOnly + @VisibleForTesting public static void setLogger(Logger logger) { log = logger; } From d802e67199234feb0e2cad5d85b00f4310d9dfe5 Mon Sep 17 00:00:00 2001 From: arafat Date: Wed, 8 May 2024 14:38:07 +0530 Subject: [PATCH 20/21] Changed the java doc as well --- .../src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java index b8a9ce5a2bfb..76b601b1c0eb 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java @@ -265,7 +265,7 @@ public void untarCheckpointFile(File tarFile, Path destPath) * The method begins with the leaf node (the key itself) and recursively prepends parent directory names, fetched * via NSSummary objects, until reaching the parent bucket (parentId is -1). It effectively builds the path from * bottom to top, finally prepending the volume and bucket names to complete the full path. If the directory structure - * is currently being rebuilt (indicated by the isRebuilding flag), this method returns an empty string to signify + * is currently being rebuilt (indicated by the rebuildTriggered flag), this method returns an empty string to signify * that path construction is temporarily unavailable. * * @param omKeyInfo The OmKeyInfo object for the key From 4c9322166c2a4f19b028782aca55528a288d5025 Mon Sep 17 00:00:00 2001 From: arafat Date: Wed, 8 May 2024 18:03:32 +0530 Subject: [PATCH 21/21] Fixed find bugs --- .../hadoop/ozone/recon/TestReconContainerEndpoint.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconContainerEndpoint.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconContainerEndpoint.java index 95a5c0c19927..8c334780d94f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconContainerEndpoint.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconContainerEndpoint.java @@ -53,10 +53,10 @@ */ public class TestReconContainerEndpoint { - private static OzoneClient client; - private static MiniOzoneCluster cluster = null; - private static OzoneConfiguration conf; - private static ObjectStore store; + private OzoneConfiguration conf; + private MiniOzoneCluster cluster; + private OzoneClient client; + private ObjectStore store; @BeforeEach public void init() throws Exception {