Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
46 commits
Select commit Hold shift + click to select a range
8c626c6
HDDS-13021. AbstractKeyDeletingService should have unique call id for…
swamirishi May 12, 2025
50b1a23
HDDS-13022. Segragate Exclusive Dir Size and ExclusiveKeySize in Snap…
swamirishi May 12, 2025
685aac2
HDDS-13022. Add Test OzoneSnapshot client code
swamirishi May 12, 2025
c29d3d8
HDDS-13022. Add hashcode for OzoneSnapshots
swamirishi May 12, 2025
49184f6
HDDS-13022. Add hashcode for OzoneSnapshots
swamirishi May 12, 2025
2014b2a
Merge remote-tracking branch 'apache/master' into HEAD
swamirishi May 13, 2025
ea94417
HDDS-13022. Address review comments
swamirishi May 13, 2025
7bcbda6
HDDS-13022. Address review comments
swamirishi May 13, 2025
b86ec7e
HDDS-13031. Remove Lock set bits for SNAPSHOT_GC_LOCK
swamirishi May 13, 2025
617cf00
HDDS-13026. KeyDeleting service should also delete RenameEntries
swamirishi May 14, 2025
33435cb
Merge remote-tracking branch 'origin/HDDS-13031' into HEAD
swamirishi May 14, 2025
40b6a83
HDDS-13031. Fix Locking setbit issue
swamirishi May 14, 2025
c962507
HDDS-13031. Fix checkstyle
swamirishi May 14, 2025
29b1c6d
Merge remote-tracking branch 'origin/HDDS-13031' into HEAD
swamirishi May 14, 2025
ef854c6
HDDS-13025. Refactor KeyDeleting Service to use ReclaimableKeyFilter
swamirishi May 14, 2025
d97755f
HDDS-13025. Call Pending Deletion Function for AOS
swamirishi May 14, 2025
bf59795
HDDS-13025. Revert java doc changes
swamirishi May 15, 2025
5dc9edc
Merge remote-tracking branch 'apache/master' into HEAD
swamirishi May 16, 2025
6f12aae
HDDS-13031. Implement a Flat Lock resource in OzoneManagerLock
swamirishi May 16, 2025
c8ea2e6
Merge remote-tracking branch 'apache/master' into HEAD
swamirishi May 16, 2025
588dab3
HDDS-13031. Fix pmd
swamirishi May 16, 2025
2cad0f1
HDDS-13031. Fix lock details code
swamirishi May 16, 2025
6dcf230
Merge remote-tracking branch 'apache/master' into HEAD
swamirishi May 17, 2025
75c4764
HDDS-13031. Fix alignment
swamirishi May 17, 2025
37b1d30
Merge remote-tracking branch 'apache/master' into HEAD
swamirishi May 18, 2025
41aeccb
Merge remote-tracking branch 'apache/master' into HEAD
swamirishi May 18, 2025
2d92095
HDDS-13025. Add bootstrap lock
swamirishi May 19, 2025
4e76337
HDDS-13076. Refactor OzoneManagerLock class to rename Resource class …
swamirishi May 19, 2025
f1144dd
Merge remote-tracking branch 'origin/HDDS-13076' into HEAD
swamirishi May 19, 2025
7f0c0d2
Merge remote-tracking branch 'apache/master' into HEAD
swamirishi May 20, 2025
82400c0
HDDS-13031. Address review comments
swamirishi May 20, 2025
6b9f7e0
Merge remote-tracking branch 'origin/HDDS-13031' into HEAD
swamirishi May 20, 2025
15a3777
Merge remote-tracking branch 'apache/master' into HEAD
swamirishi May 20, 2025
3884018
HDDS-13025. Fix javadoc
swamirishi May 20, 2025
7f4387a
HDDS-13025. Add delete key metrics
swamirishi May 21, 2025
be419bd
HDDS-13025. Make KeydeletingService run parallel deep cleaning for sn…
swamirishi May 21, 2025
42c8d3d
HDDS-13025. Fix pmd check
swamirishi May 21, 2025
62e1bf2
Merge remote-tracking branch 'apache/master' into HEAD
swamirishi May 21, 2025
690330b
HDDS-13025. Merge with master
swamirishi May 21, 2025
9b4c6a5
HDDS-13025. Fix tests
swamirishi May 22, 2025
5fb37d5
HDDS-13025. Fix tests
swamirishi May 22, 2025
1767e38
HDDS-13025. Address review comments
swamirishi May 22, 2025
82d36ab
HDDS-13025. Fix checkstyle
swamirishi May 22, 2025
061d35f
HDDS-13025. Fix comments
swamirishi May 22, 2025
3c71d1d
HDDS-13025. Fix comments
swamirishi May 22, 2025
20adb44
HDDS-13025. remove clone keyInfo list
swamirishi May 23, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -399,8 +399,13 @@ public final class OMConfigKeys {
public static final String OZONE_THREAD_NUMBER_DIR_DELETION =
"ozone.thread.number.dir.deletion";

public static final String OZONE_THREAD_NUMBER_KEY_DELETION =
"ozone.thread.number.key.deletion";

public static final int OZONE_THREAD_NUMBER_DIR_DELETION_DEFAULT = 10;

public static final int OZONE_THREAD_NUMBER_KEY_DELETION_DEFAULT = 10;

public static final String SNAPSHOT_SST_DELETING_LIMIT_PER_TASK =
"ozone.snapshot.filtering.limit.per.task";
public static final int SNAPSHOT_SST_DELETING_LIMIT_PER_TASK_DEFAULT = 2;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,7 @@ private void addPropertiesNotInXml() {
OMConfigKeys.OZONE_OM_RANGER_HTTPS_ADMIN_API_USER,
OMConfigKeys.OZONE_OM_RANGER_HTTPS_ADMIN_API_PASSWD,
OMConfigKeys.OZONE_THREAD_NUMBER_DIR_DELETION,
OMConfigKeys.OZONE_THREAD_NUMBER_KEY_DELETION,
ScmConfigKeys.OZONE_SCM_PIPELINE_PLACEMENT_IMPL_KEY,
ScmConfigKeys.OZONE_SCM_HA_PREFIX,
S3GatewayConfigKeys.OZONE_S3G_FSO_DIRECTORY_CREATION_ENABLED,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ public void testKeysPurgingByKeyDeletingService() throws Exception {
GenericTestUtils.waitFor(
() -> {
try {
return keyManager.getPendingDeletionKeys(Integer.MAX_VALUE)
return keyManager.getPendingDeletionKeys((kv) -> true, Integer.MAX_VALUE)
.getKeyBlocksList().isEmpty();
} catch (IOException e) {
return false;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_DEEP_CLEANING_ENABLED;
import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyBoolean;
import static org.mockito.ArgumentMatchers.anyInt;
import static org.mockito.ArgumentMatchers.anyString;
Expand Down Expand Up @@ -127,6 +128,7 @@ public void setup() throws Exception {
conf.setBoolean(OZONE_SNAPSHOT_DEEP_CLEANING_ENABLED, true);
conf.setTimeDuration(OZONE_SNAPSHOT_DELETING_SERVICE_TIMEOUT,
10000, TimeUnit.MILLISECONDS);
conf.setInt(OMConfigKeys.OZONE_SNAPSHOT_DIRECTORY_SERVICE_INTERVAL, 500);
conf.setInt(OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL, 500);
conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 500,
TimeUnit.MILLISECONDS);
Expand Down Expand Up @@ -493,12 +495,12 @@ private KeyDeletingService getMockedKeyDeletingService(AtomicBoolean keyDeletion
KeyManager keyManager = Mockito.spy(om.getKeyManager());
when(ozoneManager.getKeyManager()).thenReturn(keyManager);
KeyDeletingService keyDeletingService = Mockito.spy(new KeyDeletingService(ozoneManager,
ozoneManager.getScmClient().getBlockClient(), keyManager, 10000,
100000, cluster.getConf(), false));
ozoneManager.getScmClient().getBlockClient(), 10000,
100000, cluster.getConf(), 10, false));
keyDeletingService.shutdown();
GenericTestUtils.waitFor(() -> keyDeletingService.getThreadCount() == 0, 1000,
100000);
when(keyManager.getPendingDeletionKeys(anyInt())).thenAnswer(i -> {
when(keyManager.getPendingDeletionKeys(any(), anyInt())).thenAnswer(i -> {
// wait for SDS to reach the KDS wait block before processing any key.
GenericTestUtils.waitFor(keyDeletionWaitStarted::get, 1000, 100000);
keyDeletionStarted.set(true);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -240,7 +240,6 @@ public void testExclusiveSizeWithDirectoryDeepClean() throws Exception {
Table.KeyValue<String, SnapshotInfo> snapshotEntry = iterator.next();
String snapshotName = snapshotEntry.getValue().getName();
SnapshotInfo snapshotInfo = snapshotInfoTable.get(snapshotEntry.getKey());
System.out.println(snapshotInfo.getName() + " " + snapshotInfo.getDeepCleanedDeletedDir());
assertEquals(expectedSize.get(snapshotName),
snapshotInfo.getExclusiveSize() + snapshotInfo.getExclusiveSizeDeltaFromDirDeepCleaning());
// Since for the test we are using RATIS/THREE
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@
import org.apache.hadoop.hdds.utils.BackgroundService;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.TableIterator;
import org.apache.hadoop.ozone.common.BlockGroup;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.fs.OzoneManagerFS;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
Expand Down Expand Up @@ -113,17 +112,38 @@ ListKeysResult listKeys(String volumeName, String bucketName, String startKey,
throws IOException;

/**
* Returns a PendingKeysDeletion. It has a list of pending deletion key info
* that ups to the given count.Each entry is a {@link BlockGroup}, which
* contains the info about the key name and all its associated block IDs.
* Second is a Mapping of Key-Value pair which is updated in the deletedTable.
* Retrieves pending deletion keys that match a given filter function.
*
* @param count max number of keys to return.
* @return a Pair of list of {@link BlockGroup} representing keys and blocks,
* and a hashmap for key-value pair to be updated in the deletedTable.
* @throws IOException
* @param filter a functional interface specifying the filter condition to apply
* to the keys. It takes a KeyValue pair containing a string key and
* an OmKeyInfo object, and returns a boolean value indicating whether
* the key meets the filter criteria.
* @param count the maximum number of keys to retrieve.
* @return a PendingKeysDeletion object containing the keys that satisfy the filter
* criteria, up to the specified count.
* @throws IOException if an I/O error occurs while fetching the keys.
*/
PendingKeysDeletion getPendingDeletionKeys(int count) throws IOException;
PendingKeysDeletion getPendingDeletionKeys(
CheckedFunction<Table.KeyValue<String, OmKeyInfo>, Boolean, IOException> filter, int count)
throws IOException;

/**
* Retrieves the keys that are pending deletion in a specified bucket and volume.
*
* @param volume the name of the volume that contains the bucket.
* @param bucket the name of the bucket within the volume where keys are located.
* @param startKey the key from which to start retrieving pending deletions.
* @param filter a filter function to determine which keys should be included
* in the pending deletion list.
* @param count the maximum number of keys to retrieve that are pending deletion.
* @return a PendingKeysDeletion object containing the list of keys
* pending deletion based on the specified parameters.
* @throws IOException if an I/O error occurs during the operation.
*/
PendingKeysDeletion getPendingDeletionKeys(
String volume, String bucket, String startKey,
CheckedFunction<Table.KeyValue<String, OmKeyInfo>, Boolean, IOException> filter, int count)
throws IOException;

/**
* Returns a list rename entries from the snapshotRenamedTable.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,8 @@
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL_DEFAULT;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_THREAD_NUMBER_DIR_DELETION;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_THREAD_NUMBER_DIR_DELETION_DEFAULT;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_THREAD_NUMBER_KEY_DELETION;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_THREAD_NUMBER_KEY_DELETION_DEFAULT;
import static org.apache.hadoop.ozone.om.OzoneManagerUtils.getBucketLayout;
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND;
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND;
Expand Down Expand Up @@ -113,6 +115,7 @@
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
import org.apache.hadoop.fs.FileEncryptionInfo;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
Expand All @@ -136,6 +139,7 @@
import org.apache.hadoop.net.TableMapping;
import org.apache.hadoop.ozone.OmUtils;
import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.common.BlockGroup;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
import org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo;
Expand Down Expand Up @@ -172,6 +176,7 @@
import org.apache.hadoop.ozone.security.acl.OzoneObj;
import org.apache.hadoop.ozone.security.acl.RequestContext;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.util.Lists;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.Time;
import org.apache.ratis.util.function.CheckedFunction;
Expand Down Expand Up @@ -254,9 +259,15 @@ public void start(OzoneConfiguration configuration) {
OZONE_BLOCK_DELETING_SERVICE_TIMEOUT,
OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT,
TimeUnit.MILLISECONDS);
int keyDeletingServiceCorePoolSize =
configuration.getInt(OZONE_THREAD_NUMBER_KEY_DELETION,
OZONE_THREAD_NUMBER_KEY_DELETION_DEFAULT);
if (keyDeletingServiceCorePoolSize <= 0) {
keyDeletingServiceCorePoolSize = 1;
}
keyDeletingService = new KeyDeletingService(ozoneManager,
scmClient.getBlockClient(), this, blockDeleteInterval,
serviceTimeout, configuration, isSnapshotDeepCleaningEnabled);
scmClient.getBlockClient(), blockDeleteInterval,
serviceTimeout, configuration, keyDeletingServiceCorePoolSize, isSnapshotDeepCleaningEnabled);
keyDeletingService.start();
}

Expand Down Expand Up @@ -722,12 +733,66 @@ public ListKeysResult listKeys(String volumeName, String bucketName,
}

@Override
public PendingKeysDeletion getPendingDeletionKeys(final int count)
public PendingKeysDeletion getPendingDeletionKeys(
final CheckedFunction<Table.KeyValue<String, OmKeyInfo>, Boolean, IOException> filter, final int count)
throws IOException {
OmMetadataManagerImpl omMetadataManager =
(OmMetadataManagerImpl) metadataManager;
return omMetadataManager
.getPendingDeletionKeys(count, ozoneManager.getOmSnapshotManager());
return getPendingDeletionKeys(null, null, null, filter, count);
}

@Override
public PendingKeysDeletion getPendingDeletionKeys(
String volume, String bucket, String startKey,
CheckedFunction<Table.KeyValue<String, OmKeyInfo>, Boolean, IOException> filter,
int count) throws IOException {
List<BlockGroup> keyBlocksList = Lists.newArrayList();
Map<String, RepeatedOmKeyInfo> keysToModify = new HashMap<>();
// Bucket prefix would be empty if volume is empty i.e. either null or "".
Optional<String> bucketPrefix = getBucketPrefix(volume, bucket, false);
try (TableIterator<String, ? extends Table.KeyValue<String, RepeatedOmKeyInfo>>
delKeyIter = metadataManager.getDeletedTable().iterator(bucketPrefix.orElse(""))) {

/* Seeking to the start key if it not null. The next key picked up would be ensured to start with the bucket
prefix, {@link org.apache.hadoop.hdds.utils.db.Table#iterator(bucketPrefix)} would ensure this.
*/
if (startKey != null) {
delKeyIter.seek(startKey);
}
int currentCount = 0;
while (delKeyIter.hasNext() && currentCount < count) {
RepeatedOmKeyInfo notReclaimableKeyInfo = new RepeatedOmKeyInfo();
Table.KeyValue<String, RepeatedOmKeyInfo> kv = delKeyIter.next();
if (kv != null) {
List<BlockGroup> blockGroupList = Lists.newArrayList();
// Multiple keys with the same path can be queued in one DB entry
RepeatedOmKeyInfo infoList = kv.getValue();
for (OmKeyInfo info : infoList.getOmKeyInfoList()) {

// Skip the key if the filter doesn't allow the file to be deleted.
if (filter == null || filter.apply(Table.newKeyValue(kv.getKey(), info))) {
List<BlockID> blockIDS = info.getKeyLocationVersions().stream()
.flatMap(versionLocations -> versionLocations.getLocationList().stream()
.map(b -> new BlockID(b.getContainerID(), b.getLocalID()))).collect(Collectors.toList());
BlockGroup keyBlocks = BlockGroup.newBuilder().setKeyName(kv.getKey())
.addAllBlockIDs(blockIDS).build();
blockGroupList.add(keyBlocks);
currentCount++;
} else {
notReclaimableKeyInfo.addOmKeyInfo(info);
}
}

List<OmKeyInfo> notReclaimableKeyInfoList = notReclaimableKeyInfo.getOmKeyInfoList();

// If all the versions are not reclaimable, then modify key by just purging the key that can be purged.
if (!notReclaimableKeyInfoList.isEmpty() &&
notReclaimableKeyInfoList.size() != infoList.getOmKeyInfoList().size()) {
keysToModify.put(kv.getKey(), notReclaimableKeyInfo);
}
keyBlocksList.addAll(blockGroupList);
}
}
}
return new PendingKeysDeletion(keyBlocksList, keysToModify);
}

private <V, R> List<Table.KeyValue<String, R>> getTableEntries(String startKey,
Expand Down
Loading