Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
8c626c6
HDDS-13021. AbstractKeyDeletingService should have unique call id for…
swamirishi May 12, 2025
50b1a23
HDDS-13022. Segragate Exclusive Dir Size and ExclusiveKeySize in Snap…
swamirishi May 12, 2025
685aac2
HDDS-13022. Add Test OzoneSnapshot client code
swamirishi May 12, 2025
c29d3d8
HDDS-13022. Add hashcode for OzoneSnapshots
swamirishi May 12, 2025
49184f6
HDDS-13022. Add hashcode for OzoneSnapshots
swamirishi May 12, 2025
2014b2a
Merge remote-tracking branch 'apache/master' into HEAD
swamirishi May 13, 2025
ea94417
HDDS-13022. Address review comments
swamirishi May 13, 2025
7bcbda6
HDDS-13022. Address review comments
swamirishi May 13, 2025
b86ec7e
HDDS-13031. Remove Lock set bits for SNAPSHOT_GC_LOCK
swamirishi May 13, 2025
617cf00
HDDS-13026. KeyDeleting service should also delete RenameEntries
swamirishi May 14, 2025
33435cb
Merge remote-tracking branch 'origin/HDDS-13031' into HEAD
swamirishi May 14, 2025
40b6a83
HDDS-13031. Fix Locking setbit issue
swamirishi May 14, 2025
c962507
HDDS-13031. Fix checkstyle
swamirishi May 14, 2025
29b1c6d
Merge remote-tracking branch 'origin/HDDS-13031' into HEAD
swamirishi May 14, 2025
0373c1b
Merge remote-tracking branch 'apache/master' into HEAD
swamirishi May 20, 2025
4b84307
HDDS-13026. Fix checkstyle
swamirishi May 20, 2025
644a931
Merge remote-tracking branch 'apache/master' into HEAD
swamirishi May 24, 2025
6116cba
HDDS-13026. Merge with master
swamirishi May 24, 2025
3af0ac6
HDDS-13026. Merge with master
swamirishi May 24, 2025
6249698
HDDS-13026. Add OmKeyPurgeRequest changes
swamirishi May 24, 2025
84dcb6b
HDDS-13026. Add test case
swamirishi May 24, 2025
ea8a3bc
HDDS-13026. Add test case
swamirishi May 24, 2025
fba3c21
HDDS-13026. Add rename entries purged in metrics
swamirishi May 26, 2025
4f89ca6
HDDS-13026. Fix test
swamirishi May 27, 2025
7786270
HDDS-13026. Fix alignment
swamirishi May 27, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ public void testKeysPurgingByKeyDeletingService() throws Exception {
GenericTestUtils.waitFor(
() -> {
try {
return keyManager.getPendingDeletionKeys(Integer.MAX_VALUE)
return keyManager.getPendingDeletionKeys((kv) -> true, Integer.MAX_VALUE)
.getKeyBlocksList().isEmpty();
} catch (IOException e) {
return false;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_DEEP_CLEANING_ENABLED;
import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyBoolean;
import static org.mockito.ArgumentMatchers.anyInt;
import static org.mockito.ArgumentMatchers.anyString;
Expand Down Expand Up @@ -497,7 +498,7 @@ private KeyDeletingService getMockedKeyDeletingService(AtomicBoolean keyDeletion
keyDeletingService.shutdown();
GenericTestUtils.waitFor(() -> keyDeletingService.getThreadCount() == 0, 1000,
100000);
when(keyManager.getPendingDeletionKeys(anyInt())).thenAnswer(i -> {
when(keyManager.getPendingDeletionKeys(any(), anyInt())).thenAnswer(i -> {
// wait for SDS to reach the KDS wait block before processing any key.
GenericTestUtils.waitFor(keyDeletionWaitStarted::get, 1000, 100000);
keyDeletionStarted.set(true);
Expand Down Expand Up @@ -616,9 +617,9 @@ public void testParallelExcecutionOfKeyDeletionAndSnapshotDeletion() throws Exce
try (ReferenceCounted<OmSnapshot> snapshot = om.getOmSnapshotManager().getSnapshot(testBucket.getVolumeName(),
testBucket.getName(), testBucket.getName() + "snap2")) {
renamesKeyEntries = snapshot.get().getKeyManager().getRenamesKeyEntries(testBucket.getVolumeName(),
testBucket.getName(), "", 1000);
testBucket.getName(), "", (kv) -> true, 1000);
deletedKeyEntries = snapshot.get().getKeyManager().getDeletedKeyEntries(testBucket.getVolumeName(),
testBucket.getName(), "", 1000);
testBucket.getName(), "", (kv) -> true, 1000);
deletedDirEntries = snapshot.get().getKeyManager().getDeletedDirEntries(testBucket.getVolumeName(),
testBucket.getName(), 1000);
}
Expand Down Expand Up @@ -653,20 +654,20 @@ public void testParallelExcecutionOfKeyDeletionAndSnapshotDeletion() throws Exce
testBucket.getName(), testBucket.getName() + "snap2")) {
Assertions.assertEquals(Collections.emptyList(),
snapshot.get().getKeyManager().getRenamesKeyEntries(testBucket.getVolumeName(),
testBucket.getName(), "", 1000));
testBucket.getName(), "", (kv) -> true, 1000));
Assertions.assertEquals(Collections.emptyList(),
snapshot.get().getKeyManager().getDeletedKeyEntries(testBucket.getVolumeName(),
testBucket.getName(), "", 1000));
testBucket.getName(), "", (kv) -> true, 1000));
Assertions.assertEquals(Collections.emptyList(),
snapshot.get().getKeyManager().getDeletedDirEntries(testBucket.getVolumeName(),
testBucket.getName(), 1000));
}
List<Table.KeyValue<String, String>> aosRenamesKeyEntries =
om.getKeyManager().getRenamesKeyEntries(testBucket.getVolumeName(),
testBucket.getName(), "", 1000);
testBucket.getName(), "", (kv) -> true, 1000);
List<Table.KeyValue<String, List<OmKeyInfo>>> aosDeletedKeyEntries =
om.getKeyManager().getDeletedKeyEntries(testBucket.getVolumeName(),
testBucket.getName(), "", 1000);
testBucket.getName(), "", (kv) -> true, 1000);
List<Table.KeyValue<String, OmKeyInfo>> aosDeletedDirEntries =
om.getKeyManager().getDeletedDirEntries(testBucket.getVolumeName(),
testBucket.getName(), 1000);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -240,7 +240,6 @@ public void testExclusiveSizeWithDirectoryDeepClean() throws Exception {
Table.KeyValue<String, SnapshotInfo> snapshotEntry = iterator.next();
String snapshotName = snapshotEntry.getValue().getName();
SnapshotInfo snapshotInfo = snapshotInfoTable.get(snapshotEntry.getKey());
System.out.println(snapshotInfo.getName() + " " + snapshotInfo.getDeepCleanedDeletedDir());
assertEquals(expectedSize.get(snapshotName),
snapshotInfo.getExclusiveSize() + snapshotInfo.getExclusiveSizeDeltaFromDirDeepCleaning());
// Since for the test we are using RATIS/THREE
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1392,6 +1392,7 @@ message PurgeKeysRequest {
repeated SnapshotMoveKeyInfos keysToUpdate = 3;
// previous snapshotID can also be null & this field would be absent in older requests.
optional NullableUUID expectedPreviousSnapshotID = 4;
repeated string renamedKeys = 5;
}

message PurgeKeysResponse {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList;
import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts;
import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
import org.apache.hadoop.ozone.om.service.CompactionService;
import org.apache.hadoop.ozone.om.service.DirectoryDeletingService;
import org.apache.hadoop.ozone.om.service.KeyDeletingService;
Expand Down Expand Up @@ -123,18 +124,39 @@ ListKeysResult listKeys(String volumeName, String bucketName, String startKey,
* and a hashmap for key-value pair to be updated in the deletedTable.
* @throws IOException
*/
PendingKeysDeletion getPendingDeletionKeys(int count) throws IOException;
PendingKeysDeletion getPendingDeletionKeys(
CheckedFunction<Table.KeyValue<String, OmKeyInfo>, Boolean, IOException> filter, int count)
throws IOException;

/**
* Returns a PendingKeysDeletion. It has a list of pending deletion key info
* that ups to the given count.Each entry is a {@link BlockGroup}, which
* contains the info about the key name and all its associated block IDs.
* Second is a Mapping of Key-Value pair which is updated in the deletedTable.
*
* @param count max number of keys to return.
* @return a Pair of list of {@link BlockGroup} representing keys and blocks,
* and a hashmap for key-value pair to be updated in the deletedTable.
* @throws IOException
*/
PendingKeysDeletion getPendingDeletionKeys(
String volume, String bucket, String startKey,
CheckedFunction<Table.KeyValue<String, OmKeyInfo>, Boolean, IOException> filter, int count)
throws IOException;

/**
* Returns a list rename entries from the snapshotRenamedTable.
*
* @param size max number of keys to return.
* @param count max number of keys to return.
* @param filter filter to apply on the entries.
* @return a Pair of list of {@link org.apache.hadoop.hdds.utils.db.Table.KeyValue} representing the keys in the
* underlying metadataManager.
* @throws IOException
*/
List<Table.KeyValue<String, String>> getRenamesKeyEntries(
String volume, String bucket, String startKey, int size) throws IOException;
String volume, String bucket, String startKey,
CheckedFunction<Table.KeyValue<String, String>, Boolean, IOException> filter, int count)
throws IOException;


/**
Expand All @@ -158,13 +180,16 @@ CheckedFunction<KeyManager, OmKeyInfo, IOException> getPreviousSnapshotOzoneKeyI
/**
* Returns a list deleted entries from the deletedTable.
*
* @param size max number of keys to return.
* @param count max number of keys to return.
* @param filter filter to apply on the entries.
* @return a Pair of list of {@link org.apache.hadoop.hdds.utils.db.Table.KeyValue} representing the keys in the
* underlying metadataManager.
* @throws IOException
*/
List<Table.KeyValue<String, List<OmKeyInfo>>> getDeletedKeyEntries(
String volume, String bucket, String startKey, int size) throws IOException;
String volume, String bucket, String startKey,
CheckedFunction<Table.KeyValue<String, RepeatedOmKeyInfo>, Boolean, IOException> filter,
int count) throws IOException;

/**
* Returns the names of up to {@code count} open keys whose age is
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,7 @@
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
import org.apache.hadoop.fs.FileEncryptionInfo;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
Expand All @@ -136,6 +137,7 @@
import org.apache.hadoop.net.TableMapping;
import org.apache.hadoop.ozone.OmUtils;
import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.common.BlockGroup;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
import org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo;
Expand Down Expand Up @@ -172,6 +174,7 @@
import org.apache.hadoop.ozone.security.acl.OzoneObj;
import org.apache.hadoop.ozone.security.acl.RequestContext;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.util.Lists;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.Time;
import org.apache.ratis.util.function.CheckedFunction;
Expand Down Expand Up @@ -722,17 +725,73 @@ public ListKeysResult listKeys(String volumeName, String bucketName,
}

@Override
public PendingKeysDeletion getPendingDeletionKeys(final int count)
public PendingKeysDeletion getPendingDeletionKeys(
final CheckedFunction<Table.KeyValue<String, OmKeyInfo>, Boolean, IOException> filter, final int count)
throws IOException {
OmMetadataManagerImpl omMetadataManager =
(OmMetadataManagerImpl) metadataManager;
return omMetadataManager
.getPendingDeletionKeys(count, ozoneManager.getOmSnapshotManager());
return getPendingDeletionKeys(null, null, null, filter, count);
}

@Override
public PendingKeysDeletion getPendingDeletionKeys(
String volume, String bucket, String startKey,
CheckedFunction<Table.KeyValue<String, OmKeyInfo>, Boolean, IOException> filter,
int count) throws IOException {
List<BlockGroup> keyBlocksList = Lists.newArrayList();
Map<String, RepeatedOmKeyInfo> keysToModify = new HashMap<>();
// Bucket prefix would be empty if volume is empty i.e. either null or "".
Optional<String> bucketPrefix = getBucketPrefix(volume, bucket, false);
try (TableIterator<String, ? extends Table.KeyValue<String, RepeatedOmKeyInfo>>
delKeyIter = metadataManager.getDeletedTable().iterator(bucketPrefix.orElse(""))) {

/* Seeking to the start key if it not null. The next key picked up would be ensured to start with the bucket
prefix, {@link org.apache.hadoop.hdds.utils.db.Table#iterator(bucketPrefix)} would ensure this.
*/
if (startKey != null) {
delKeyIter.seek(startKey);
}
int currentCount = 0;
while (delKeyIter.hasNext() && currentCount < count) {
RepeatedOmKeyInfo notReclaimableKeyInfo = new RepeatedOmKeyInfo();
Table.KeyValue<String, RepeatedOmKeyInfo> kv = delKeyIter.next();
if (kv != null) {
List<BlockGroup> blockGroupList = Lists.newArrayList();
// Multiple keys with the same path can be queued in one DB entry
RepeatedOmKeyInfo infoList = kv.getValue();
for (OmKeyInfo info : infoList.cloneOmKeyInfoList()) {

// Skip the key if the filter doesn't allow the file to be deleted.
if (filter == null || filter.apply(Table.newKeyValue(kv.getKey(), info))) {
List<BlockID> blockIDS = info.getKeyLocationVersions().stream()
.flatMap(versionLocations -> versionLocations.getLocationList().stream()
.map(b -> new BlockID(b.getContainerID(), b.getLocalID()))).collect(Collectors.toList());
BlockGroup keyBlocks = BlockGroup.newBuilder().setKeyName(kv.getKey())
.addAllBlockIDs(blockIDS).build();
blockGroupList.add(keyBlocks);
currentCount++;
} else {
notReclaimableKeyInfo.addOmKeyInfo(info);
}
}

List<OmKeyInfo> notReclaimableKeyInfoList = notReclaimableKeyInfo.getOmKeyInfoList();

// If all the versions are not reclaimable, then modify key by just purging the key that can be purged.
if (notReclaimableKeyInfoList.size() > 0 &&
notReclaimableKeyInfoList.size() != infoList.getOmKeyInfoList().size()) {
keysToModify.put(kv.getKey(), notReclaimableKeyInfo);
}
keyBlocksList.addAll(blockGroupList);
}
}
}
return new PendingKeysDeletion(keyBlocksList, keysToModify);
}

private <V, R> List<Table.KeyValue<String, R>> getTableEntries(String startKey,
TableIterator<String, ? extends Table.KeyValue<String, V>> tableIterator,
Function<V, R> valueFunction, int size) throws IOException {
Function<V, R> valueFunction,
CheckedFunction<Table.KeyValue<String, V>, Boolean, IOException> filter,
int size) throws IOException {
List<Table.KeyValue<String, R>> entries = new ArrayList<>();
/* Seek to the start key if it's not null. The next key in queue is ensured to start with the bucket
prefix, {@link org.apache.hadoop.hdds.utils.db.Table#iterator(bucketPrefix)} would ensure this.
Expand All @@ -745,7 +804,7 @@ private <V, R> List<Table.KeyValue<String, R>> getTableEntries(String startKey,
int currentCount = 0;
while (tableIterator.hasNext() && currentCount < size) {
Table.KeyValue<String, V> kv = tableIterator.next();
if (kv != null) {
if (kv != null && filter.apply(kv)) {
entries.add(Table.newKeyValue(kv.getKey(), valueFunction.apply(kv.getValue())));
currentCount++;
}
Expand All @@ -767,11 +826,12 @@ private Optional<String> getBucketPrefix(String volumeName, String bucketName, b

@Override
public List<Table.KeyValue<String, String>> getRenamesKeyEntries(
String volume, String bucket, String startKey, int size) throws IOException {
String volume, String bucket, String startKey,
CheckedFunction<Table.KeyValue<String, String>, Boolean, IOException> filter, int size) throws IOException {
Optional<String> bucketPrefix = getBucketPrefix(volume, bucket, false);
try (TableIterator<String, ? extends Table.KeyValue<String, String>>
renamedKeyIter = metadataManager.getSnapshotRenamedTable().iterator(bucketPrefix.orElse(""))) {
return getTableEntries(startKey, renamedKeyIter, Function.identity(), size);
return getTableEntries(startKey, renamedKeyIter, Function.identity(), filter, size);
}
}

Expand Down Expand Up @@ -815,11 +875,13 @@ private <T> CheckedFunction<KeyManager, T, IOException> getPreviousSnapshotOzone

@Override
public List<Table.KeyValue<String, List<OmKeyInfo>>> getDeletedKeyEntries(
String volume, String bucket, String startKey, int size) throws IOException {
String volume, String bucket, String startKey,
CheckedFunction<Table.KeyValue<String, RepeatedOmKeyInfo>, Boolean, IOException> filter,
int size) throws IOException {
Optional<String> bucketPrefix = getBucketPrefix(volume, bucket, false);
try (TableIterator<String, ? extends Table.KeyValue<String, RepeatedOmKeyInfo>>
delKeyIter = metadataManager.getDeletedTable().iterator(bucketPrefix.orElse(""))) {
return getTableEntries(startKey, delKeyIter, RepeatedOmKeyInfo::cloneOmKeyInfoList, size);
return getTableEntries(startKey, delKeyIter, RepeatedOmKeyInfo::cloneOmKeyInfoList, filter, size);
}
}

Expand Down
Loading