Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
33 commits
Select commit Hold shift + click to select a range
49ef9e8
Rebased the PR
ArafatKhan2198 Oct 23, 2023
a47e09f
Rebased the PR
ArafatKhan2198 Oct 23, 2023
0c2005f
New changes
ArafatKhan2198 Oct 25, 2023
6031ad5
Fixed NSSummaryTaskWithOBS reprocess method
ArafatKhan2198 Oct 29, 2023
4c28bc9
Refactored NSSummaryTaskWithOBS and removed unnecessary methods
ArafatKhan2198 Oct 29, 2023
fd81752
Added process() method to NSSummaryTaskWithOBS
ArafatKhan2198 Oct 29, 2023
606044a
Removed unnecessary debugging code
ArafatKhan2198 Oct 29, 2023
4fe5ef8
Added Unit test for reprocess() for NSSummaryTaskWithOBS
ArafatKhan2198 Oct 29, 2023
17df34d
Added UT's for the process() method in NSSummaryWithOBS
ArafatKhan2198 Oct 30, 2023
da7f4e1
Fixed first set of review comments
ArafatKhan2198 Dec 7, 2023
37c0438
Took care of the review comments and also fixed the failing UT's
ArafatKhan2198 Dec 11, 2023
e9f0c4b
Merge branch 'master' into HDDS-7810
ArafatKhan2198 Dec 11, 2023
2655425
Migrated junit for TestNSSummaryTaskWithOBS to jUnit5
ArafatKhan2198 Dec 11, 2023
f610de0
Removed unecessary changes
ArafatKhan2198 Dec 11, 2023
9c53399
Made the latest review chanegs
ArafatKhan2198 Jan 29, 2024
c5d3b27
Made review changes
ArafatKhan2198 Jan 29, 2024
68ab58f
Fixed failing CI checks in fork
ArafatKhan2198 Jan 30, 2024
ddd1a3b
Added apache licence for class
ArafatKhan2198 Jan 30, 2024
76597a4
Fixed checkstyle issues for HDDS-7810
ArafatKhan2198 Jan 31, 2024
5ab09d3
Potential fix for serilization error
ArafatKhan2198 Jan 31, 2024
ac2f003
HDDS-7810 fixed the find bug problem
ArafatKhan2198 Jan 31, 2024
297be0c
Fixed checkstyle issues
ArafatKhan2198 Jan 31, 2024
01f817b
Fixed failing UT
ArafatKhan2198 Feb 7, 2024
913662e
Fixed failing UT's and also added tests for the NSSummaryEndpoint for…
ArafatKhan2198 Feb 11, 2024
57801b2
Fixed checkstyle
ArafatKhan2198 Feb 11, 2024
3e8ec2a
Added license
ArafatKhan2198 Feb 11, 2024
576b4c0
Merge branch 'master' into HDDS-7810
ArafatKhan2198 Feb 13, 2024
76158c9
Made changes for code review
ArafatKhan2198 Feb 15, 2024
cf04f83
Fixed review comments
ArafatKhan2198 Feb 27, 2024
dca31d3
Fixed more review comments
ArafatKhan2198 Feb 27, 2024
1d2d0b5
Made changes to the TestNSSummaryTaskWithOBS
ArafatKhan2198 Feb 28, 2024
e1120ae
Change the names of variable constants
ArafatKhan2198 Feb 29, 2024
0555a7f
Fixed potential NPE being thrown by value
ArafatKhan2198 Feb 29, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -178,9 +178,7 @@ public static BucketHandler getBucketHandler(
omMetadataManager, reconSCM, bucketInfo);
} else if (bucketInfo.getBucketLayout()
.equals(BucketLayout.OBJECT_STORE)) {
// TODO: HDDS-7810 Write a handler for object store bucket
// We can use LegacyBucketHandler for OBS bucket for now.
return new LegacyBucketHandler(reconNamespaceSummaryManager,
return new OBSBucketHandler(reconNamespaceSummaryManager,
omMetadataManager, reconSCM, bucketInfo);
} else {
LOG.error("Unsupported bucket layout: " +
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@
import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;

/**
* Class for handling FSO buckets.
* Class for handling FSO buckets NameSpaceSummaries.
*/
public class FSOBucketHandler extends BucketHandler {
private static final Logger LOG =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@
import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;

/**
* Class for handling Legacy buckets.
* Class for handling Legacy buckets NameSpaceSummaries.
*/
public class LegacyBucketHandler extends BucketHandler {

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,268 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.recon.api.handlers;


import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.TableIterator;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.recon.api.types.DUResponse;
import org.apache.hadoop.ozone.recon.api.types.EntityType;
import org.apache.hadoop.ozone.recon.api.types.NSSummary;
import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager;

import java.io.IOException;
import java.util.List;

import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;

/**
* Class for handling OBS buckets NameSpaceSummaries.
*/
public class OBSBucketHandler extends BucketHandler {

private final String vol;
private final String bucket;
private final OmBucketInfo omBucketInfo;

public OBSBucketHandler(
ReconNamespaceSummaryManager reconNamespaceSummaryManager,
ReconOMMetadataManager omMetadataManager,
OzoneStorageContainerManager reconSCM,
OmBucketInfo bucketInfo) {
super(reconNamespaceSummaryManager, omMetadataManager,
reconSCM);
this.omBucketInfo = bucketInfo;
this.vol = omBucketInfo.getVolumeName();
this.bucket = omBucketInfo.getBucketName();
}

/**
* Helper function to check if a path is a key, or invalid.
*
* @param keyName key name
* @return KEY, or UNKNOWN
* @throws IOException
*/
@Override
public EntityType determineKeyPath(String keyName) throws IOException {
String key = OM_KEY_PREFIX + vol +
OM_KEY_PREFIX + bucket +
OM_KEY_PREFIX + keyName;

Table<String, OmKeyInfo> keyTable = getKeyTable();

try (
TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
iterator = keyTable.iterator()) {
iterator.seek(key);
if (iterator.hasNext()) {
Table.KeyValue<String, OmKeyInfo> kv = iterator.next();
String dbKey = kv.getKey();
if (dbKey.equals(key)) {
return EntityType.KEY;
}
}
}
return EntityType.UNKNOWN;
}

/**
* This method handles disk usage of direct keys.
*
* @param parentId The identifier for the parent bucket.
* @param withReplica if withReplica is enabled, set sizeWithReplica
* for each direct key's DU
* @param listFile if listFile is enabled, append key DU as a children
* keys
* @param duData the current DU data
* @param normalizedPath the normalized path request
* @return the total DU of all direct keys
* @throws IOException IOE
*/
@Override
public long handleDirectKeys(long parentId, boolean withReplica,
boolean listFile,
List<DUResponse.DiskUsage> duData,
String normalizedPath) throws IOException {

NSSummary nsSummary = getReconNamespaceSummaryManager()
.getNSSummary(parentId);
// Handle the case of an empty bucket.
if (nsSummary == null) {
return 0;
}
Comment on lines +109 to +114
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Check at the start of the method.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thank you for the suggestion. I have implemented the recommended changes.


Table<String, OmKeyInfo> keyTable = getKeyTable();
long keyDataSizeWithReplica = 0L;

try (
TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
iterator = keyTable.iterator()) {

String seekPrefix = OM_KEY_PREFIX +
vol +
OM_KEY_PREFIX +
bucket +
OM_KEY_PREFIX;

iterator.seek(seekPrefix);

while (iterator.hasNext()) {
// KeyName : OmKeyInfo-Object
Table.KeyValue<String, OmKeyInfo> kv = iterator.next();
String dbKey = kv.getKey();

// Exit loop if the key doesn't match the seekPrefix.
if (!dbKey.startsWith(seekPrefix)) {
break;
}

OmKeyInfo keyInfo = kv.getValue();
if (keyInfo != null) {
DUResponse.DiskUsage diskUsage = new DUResponse.DiskUsage();
String objectName = keyInfo.getKeyName();
diskUsage.setSubpath(objectName);
diskUsage.setKey(true);
diskUsage.setSize(keyInfo.getDataSize());

if (withReplica) {
long keyDU = keyInfo.getReplicatedSize();
keyDataSizeWithReplica += keyDU;
diskUsage.setSizeWithReplica(keyDU);
}
// List all the keys for the OBS bucket if requested.
if (listFile) {
duData.add(diskUsage);
}
}
}
}

return keyDataSizeWithReplica;
}

/**
* Calculates the total disk usage (DU) for an Object Store Bucket (OBS) by
* summing the sizes of all keys contained within the bucket.
* Since OBS buckets operate on a flat hierarchy, this method iterates through
* all the keys in the bucket without the need to traverse directories.
*
* @param parentId The identifier for the parent bucket.
* @return The total disk usage of all keys within the specified OBS bucket.
* @throws IOException
*/
@Override
public long calculateDUUnderObject(long parentId) throws IOException {
// Initialize the total disk usage variable.
long totalDU = 0L;

// Access the key table for the bucket.
Table<String, OmKeyInfo> keyTable = getKeyTable();

try (
TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
iterator = keyTable.iterator()) {
// Construct the seek prefix to filter keys under this bucket.
String seekPrefix =
OM_KEY_PREFIX + vol + OM_KEY_PREFIX + bucket + OM_KEY_PREFIX;
iterator.seek(seekPrefix);

// Iterate over keys in the bucket.
while (iterator.hasNext()) {
Table.KeyValue<String, OmKeyInfo> kv = iterator.next();
String keyName = kv.getKey();

// Break the loop if the current key does not start with the seekPrefix.
if (!keyName.startsWith(seekPrefix)) {
break;
}

// Sum the size of each key to the total disk usage.
OmKeyInfo keyInfo = kv.getValue();
if (keyInfo != null) {
totalDU += keyInfo.getDataSize();
}
}
}

// Return the total disk usage of all keys in the bucket.
return totalDU;
}

/**
* Object stores do not support directories.
*
* @throws UnsupportedOperationException
*/
@Override
public long getDirObjectId(String[] names)
throws UnsupportedOperationException {
throw new UnsupportedOperationException(
"Object stores do not support directories.");
}

/**
* Object stores do not support directories.
*
* @throws UnsupportedOperationException
*/
@Override
public long getDirObjectId(String[] names, int cutoff)
throws UnsupportedOperationException {
throw new UnsupportedOperationException(
"Object stores do not support directories.");
}

/**
* Returns the keyInfo object from the KEY table.
* @return OmKeyInfo
*/
@Override
public OmKeyInfo getKeyInfo(String[] names) throws IOException {
String ozoneKey = OM_KEY_PREFIX;
ozoneKey += String.join(OM_KEY_PREFIX, names);
Comment on lines +243 to +244
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit:

Suggested change
String ozoneKey = OM_KEY_PREFIX;
ozoneKey += String.join(OM_KEY_PREFIX, names);
String ozoneKey = OM_KEY_PREFIX + String.join(OM_KEY_PREFIX, names);

I'm guessing there must be some utility method for this.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I have checked the utility classes in recon, and currently there is no utility method available for this specific line. All other handlers within the codebase also utilize the same join method.


return getKeyTable().getSkipCache(ozoneKey);
}

/**
* Object stores do not support directories.
*
* @throws UnsupportedOperationException
*/
@Override
public OmDirectoryInfo getDirInfo(String[] names) throws IOException {
throw new UnsupportedOperationException(
"Object stores do not support directories.");
}

public Table<String, OmKeyInfo> getKeyTable() {
return getOmMetadataManager().getKeyTable(getBucketLayout());
}

public BucketLayout getBucketLayout() {
return BucketLayout.OBJECT_STORE;
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@
import java.util.concurrent.Callable;
import java.util.concurrent.Future;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;

/**
* Task to query data from OMDB and write into Recon RocksDB.
Expand Down Expand Up @@ -68,6 +69,7 @@ public class NSSummaryTask implements ReconOmTask {
private final ReconOMMetadataManager reconOMMetadataManager;
private final NSSummaryTaskWithFSO nsSummaryTaskWithFSO;
private final NSSummaryTaskWithLegacy nsSummaryTaskWithLegacy;
private final NSSummaryTaskWithOBS nsSummaryTaskWithOBS;
private final OzoneConfiguration ozoneConfiguration;

@Inject
Expand All @@ -86,6 +88,9 @@ public NSSummaryTask(ReconNamespaceSummaryManager
this.nsSummaryTaskWithLegacy = new NSSummaryTaskWithLegacy(
reconNamespaceSummaryManager,
reconOMMetadataManager, ozoneConfiguration);
this.nsSummaryTaskWithOBS = new NSSummaryTaskWithOBS(
reconNamespaceSummaryManager,
reconOMMetadataManager, ozoneConfiguration);
}

@Override
Expand All @@ -95,20 +100,28 @@ public String getTaskName() {

@Override
public Pair<String, Boolean> process(OMUpdateEventBatch events) {
boolean success;
success = nsSummaryTaskWithFSO.processWithFSO(events);
if (success) {
success = nsSummaryTaskWithLegacy.processWithLegacy(events);
} else {
boolean success = nsSummaryTaskWithFSO.processWithFSO(events);
if (!success) {
LOG.error("processWithFSO failed.");
}
success = nsSummaryTaskWithLegacy.processWithLegacy(events);
if (!success) {
LOG.error("processWithLegacy failed.");
}
success = nsSummaryTaskWithOBS.processWithOBS(events);
if (!success) {
LOG.error("processWithOBS failed.");
}
return new ImmutablePair<>(getTaskName(), success);
}

@Override
public Pair<String, Boolean> reprocess(OMMetadataManager omMetadataManager) {
// Initialize a list of tasks to run in parallel
Collection<Callable<Boolean>> tasks = new ArrayList<>();

long startTime = System.nanoTime(); // Record start time

try {
// reinit Recon RocksDB's namespace CF.
reconNamespaceSummaryManager.clearNSSummaryTable();
Expand All @@ -122,6 +135,8 @@ public Pair<String, Boolean> reprocess(OMMetadataManager omMetadataManager) {
.reprocessWithFSO(omMetadataManager));
tasks.add(() -> nsSummaryTaskWithLegacy
.reprocessWithLegacy(reconOMMetadataManager));
tasks.add(() -> nsSummaryTaskWithOBS
.reprocessWithOBS(reconOMMetadataManager));

List<Future<Boolean>> results;
ThreadFactory threadFactory = new ThreadFactoryBuilder()
Expand All @@ -137,17 +152,25 @@ public Pair<String, Boolean> reprocess(OMMetadataManager omMetadataManager) {
}
}
} catch (InterruptedException ex) {
LOG.error("Error while reprocessing NSSummary " +
"table in Recon DB. ", ex);
LOG.error("Error while reprocessing NSSummary table in Recon DB.", ex);
return new ImmutablePair<>(getTaskName(), false);
} catch (ExecutionException ex2) {
LOG.error("Error while reprocessing NSSummary " +
"table in Recon DB. ", ex2);
LOG.error("Error while reprocessing NSSummary table in Recon DB.", ex2);
return new ImmutablePair<>(getTaskName(), false);
} finally {
executorService.shutdown();

long endTime = System.nanoTime();
// Convert to milliseconds
long durationInMillis =
TimeUnit.NANOSECONDS.toMillis(endTime - startTime);

// Log performance metrics
LOG.info("Task execution time: {} milliseconds", durationInMillis);
}

return new ImmutablePair<>(getTaskName(), true);
}

}

Loading