Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
37 commits
Select commit Hold shift + click to select a range
7327b47
HDDS-12559. Implement Bulk Ozone Locks for taking locks on multiple s…
swamirishi Mar 11, 2025
bd5b0c6
HDDS-12560. Reclaimable Filter for Snaphost Garbage Collections
swamirishi Mar 11, 2025
2018370
HDDS-12562. Reclaimable Directory entry filter for reclaiming deleted…
swamirishi Mar 12, 2025
f1c85fd
HDDS-12560. Mock SnapshotDiffManager construction
swamirishi Mar 12, 2025
4c74de1
Merge remote-tracking branch 'origin/HDDS-12560' into HEAD
swamirishi Mar 12, 2025
43ab7b7
HDDS-12559. Revert unintended change in method signature
swamirishi Mar 12, 2025
51c88f1
Merge remote-tracking branch 'origin/HDDS-12559' into HEAD
swamirishi Mar 12, 2025
adc4bae
Merge remote-tracking branch 'origin/HDDS-12560' into HEAD
swamirishi Mar 12, 2025
b901166
HDDS-12559. Address review comments
swamirishi Apr 1, 2025
690eae9
Merge remote-tracking branch 'apache/master' into HEAD
swamirishi Apr 1, 2025
865f3a5
HDDS-12559. Add javadoc
swamirishi Apr 1, 2025
5743edb
HDDS-12560. Address review comments
swamirishi Apr 1, 2025
abcfaff
HDDS-12559. Move acquireLock to another private function
swamirishi Apr 1, 2025
a2127a4
HDDS-12560. Address review comments
swamirishi Apr 1, 2025
9f6d2a0
HDDS-12559. Address review comments
swamirishi Apr 2, 2025
9d6bae3
HDDS-12560. Address review comments
swamirishi Apr 3, 2025
7d785ab
Merge remote-tracking branch 'origin/HDDS-12559' into HEAD
swamirishi Apr 3, 2025
e1d2317
Merge remote-tracking branch 'apache/master' into HEAD
swamirishi Apr 3, 2025
215b8cc
Merge remote-tracking branch 'origin/HDDS-12559' into HEAD
swamirishi Apr 3, 2025
93ba939
HDDS-12560. Address review comments
swamirishi Apr 17, 2025
3935bf8
Update hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozo…
swamirishi Apr 17, 2025
6d638a0
HDDS-12560. Fix method folding
swamirishi Apr 17, 2025
681bb3d
Merge remote-tracking branch 'apache/master' into HEAD
swamirishi Apr 17, 2025
8fd746c
HDDS-12560. Remove Checked Function
swamirishi Apr 17, 2025
b69182e
Merge remote-tracking branch 'origin/HDDS-12560' into HEAD
swamirishi Apr 17, 2025
d32bcf0
Update hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozo…
swamirishi Apr 17, 2025
f8a2b6e
HDDS-12560. Fix compilation issue
swamirishi Apr 17, 2025
f323e3e
Merge remote-tracking branch 'origin/HDDS-12560' into HEAD
swamirishi Apr 17, 2025
29a26d2
HDDS-12560. Address review comments
swamirishi Apr 30, 2025
1ebba73
HDDS-12560. Fix test case
swamirishi Apr 30, 2025
164303e
Merge remote-tracking branch 'origin/HDDS-12560' into HEAD
swamirishi May 1, 2025
2c1d267
Merge remote-tracking branch 'apache/master' into HEAD
swamirishi May 1, 2025
4febc9c
Merge remote-tracking branch 'apache/master' into HEAD
swamirishi May 5, 2025
3e06491
HDDS-12562. Fix javadoc and comments
swamirishi May 5, 2025
45cb588
HDDS-12562. Fix javadoc and comments
swamirishi May 5, 2025
27b3604
HDDS-12562. Fix tests
swamirishi May 6, 2025
b47f986
HDDS-12562. Add assert
swamirishi May 6, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@

import java.io.File;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
Expand All @@ -28,7 +29,16 @@
* InMemory Table implementation for tests.
*/
public final class InMemoryTestTable<KEY, VALUE> implements Table<KEY, VALUE> {
private final Map<KEY, VALUE> map = new ConcurrentHashMap<>();
private final Map<KEY, VALUE> map;

public InMemoryTestTable() {
this(Collections.emptyMap());
}

public InMemoryTestTable(Map<KEY, VALUE> map) {
this.map = new ConcurrentHashMap<>();
this.map.putAll(map);
}

@Override
public void close() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;

import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Sets;
import jakarta.annotation.Nonnull;
import java.io.File;
Expand All @@ -69,6 +70,7 @@
import java.util.Set;
import java.util.TreeSet;
import java.util.UUID;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.commons.lang3.tuple.Pair;
Expand Down Expand Up @@ -104,6 +106,8 @@
import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
import org.apache.hadoop.hdds.scm.server.SCMConfigurator;
import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
import org.apache.hadoop.hdds.utils.db.InMemoryTestTable;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
import org.apache.hadoop.ozone.OzoneAcl;
Expand All @@ -112,6 +116,7 @@
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
Expand Down Expand Up @@ -1585,6 +1590,91 @@ void testGetNotExistedPart() throws IOException {
assertEquals(0, locationList.size());
}

private OmKeyInfo getMockedOmKeyInfo(OmBucketInfo bucketInfo, long parentId, String key, long objectId) {
OmKeyInfo omKeyInfo = mock(OmKeyInfo.class);
if (bucketInfo.getBucketLayout().isFileSystemOptimized()) {
when(omKeyInfo.getFileName()).thenReturn(key);
when(omKeyInfo.getParentObjectID()).thenReturn(parentId);
} else {
when(omKeyInfo.getKeyName()).thenReturn(key);
}
when(omKeyInfo.getObjectID()).thenReturn(objectId);
return omKeyInfo;
}

private OmDirectoryInfo getMockedOmDirInfo(long parentId, String key, long objectId) {
OmDirectoryInfo omKeyInfo = mock(OmDirectoryInfo.class);
when(omKeyInfo.getName()).thenReturn(key);
when(omKeyInfo.getParentObjectID()).thenReturn(parentId);
when(omKeyInfo.getObjectID()).thenReturn(objectId);
return omKeyInfo;
}

private String getDirectoryKey(long volumeId, OmBucketInfo bucketInfo, OmKeyInfo omKeyInfo) {
if (bucketInfo.getBucketLayout().isFileSystemOptimized()) {
return volumeId + "/" + bucketInfo.getObjectID() + "/" + omKeyInfo.getParentObjectID() + "/" +
omKeyInfo.getFileName();
} else {
return bucketInfo.getVolumeName() + "/" + bucketInfo.getBucketName() + "/" + omKeyInfo.getKeyName();
}
}

private String getDirectoryKey(long volumeId, OmBucketInfo bucketInfo, OmDirectoryInfo omDirInfo) {
return volumeId + "/" + bucketInfo.getObjectID() + "/" + omDirInfo.getParentObjectID() + "/" +
omDirInfo.getName();
}

private String getRenameKey(String volume, String bucket, long objectId) {
return volume + "/" + bucket + "/" + objectId;
}

@Test
public void testPreviousSnapshotOzoneDirInfo() throws IOException {
OMMetadataManager omMetadataManager = mock(OMMetadataManager.class);
when(omMetadataManager.getOzonePathKey(anyLong(), anyLong(), anyLong(), anyString()))
.thenAnswer(i -> Arrays.stream(i.getArguments()).map(Object::toString)
.collect(Collectors.joining("/")));
when(omMetadataManager.getRenameKey(anyString(), anyString(), anyLong())).thenAnswer(
i -> getRenameKey(i.getArgument(0), i.getArgument(1), i.getArgument(2)));

OMMetadataManager previousMetadataManager = mock(OMMetadataManager.class);
OzoneConfiguration configuration = new OzoneConfiguration();
KeyManagerImpl km = new KeyManagerImpl(null, null, omMetadataManager, configuration, null, null, null);
KeyManagerImpl prevKM = new KeyManagerImpl(null, null, previousMetadataManager, configuration, null, null, null);
long volumeId = 1L;
OmBucketInfo bucketInfo = OmBucketInfo.newBuilder().setBucketName(BUCKET_NAME).setVolumeName(VOLUME_NAME)
.setObjectID(2L).setBucketLayout(BucketLayout.FILE_SYSTEM_OPTIMIZED).build();
OmDirectoryInfo prevKey = getMockedOmDirInfo(5, "key", 1);
OmDirectoryInfo prevKey2 = getMockedOmDirInfo(7, "key2", 2);
OmKeyInfo currentKey = getMockedOmKeyInfo(bucketInfo, 6, "renamedKey", 1);
OmDirectoryInfo currentKeyDir = getMockedOmDirInfo(6, "renamedKey", 1);
OmKeyInfo currentKey2 = getMockedOmKeyInfo(bucketInfo, 7, "key2", 2);
OmDirectoryInfo currentKeyDir2 = getMockedOmDirInfo(7, "key2", 2);
OmKeyInfo currentKey3 = getMockedOmKeyInfo(bucketInfo, 8, "key3", 3);
OmDirectoryInfo currentKeyDir3 = getMockedOmDirInfo(8, "key3", 3);
OmKeyInfo currentKey4 = getMockedOmKeyInfo(bucketInfo, 8, "key4", 4);
OmDirectoryInfo currentKeyDir4 = getMockedOmDirInfo(8, "key4", 4);
Table<String, OmDirectoryInfo> prevDirTable = new InMemoryTestTable<>(
ImmutableMap.of(getDirectoryKey(volumeId, bucketInfo, prevKey), prevKey,
getDirectoryKey(volumeId, bucketInfo, prevKey2), prevKey2));
Table<String, String> renameTable = new InMemoryTestTable<>(
ImmutableMap.of(getRenameKey(VOLUME_NAME, BUCKET_NAME, 1),
getDirectoryKey(volumeId, bucketInfo, prevKey),
getRenameKey(VOLUME_NAME, BUCKET_NAME, 3), getDirectoryKey(volumeId, bucketInfo,
getMockedOmKeyInfo(bucketInfo, 6, "unknownKey", 9))));
when(previousMetadataManager.getDirectoryTable()).thenReturn(prevDirTable);
when(omMetadataManager.getSnapshotRenamedTable()).thenReturn(renameTable);
assertEquals(prevKey, km.getPreviousSnapshotOzoneDirInfo(volumeId, bucketInfo, currentKey).apply(prevKM));
assertEquals(prevKey2, km.getPreviousSnapshotOzoneDirInfo(volumeId, bucketInfo, currentKey2).apply(prevKM));
assertNull(km.getPreviousSnapshotOzoneDirInfo(volumeId, bucketInfo, currentKey3).apply(prevKM));
assertNull(km.getPreviousSnapshotOzoneDirInfo(volumeId, bucketInfo, currentKey4).apply(prevKM));

assertEquals(prevKey, km.getPreviousSnapshotOzoneDirInfo(volumeId, bucketInfo, currentKeyDir).apply(prevKM));
assertEquals(prevKey2, km.getPreviousSnapshotOzoneDirInfo(volumeId, bucketInfo, currentKeyDir2).apply(prevKM));
assertNull(km.getPreviousSnapshotOzoneDirInfo(volumeId, bucketInfo, currentKeyDir3).apply(prevKM));
assertNull(km.getPreviousSnapshotOzoneDirInfo(volumeId, bucketInfo, currentKeyDir4).apply(prevKM));
}

private void initKeyTableForMultipartTest(String keyName, String volume) throws IOException {
List<OmKeyLocationInfoGroup> locationInfoGroups = new ArrayList<>();
List<OmKeyLocationInfo> locationInfoList = new ArrayList<>();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,8 @@
import org.apache.hadoop.ozone.om.fs.OzoneManagerFS;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.ListKeysResult;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList;
Expand All @@ -41,6 +43,7 @@
import org.apache.hadoop.ozone.om.service.SnapshotDeletingService;
import org.apache.hadoop.ozone.om.service.SnapshotDirectoryCleaningService;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ExpiredMultipartUploadsBucket;
import org.apache.ratis.util.function.CheckedFunction;

/**
* Handles key level commands.
Expand Down Expand Up @@ -84,7 +87,6 @@ OmKeyInfo lookupKey(OmKeyArgs args, ResolvedBucket bucketLayout,
OmKeyInfo getKeyInfo(OmKeyArgs args, ResolvedBucket buctket,
String clientAddress) throws IOException;


/**
* Returns a list of keys represented by {@link OmKeyInfo}
* in the given bucket.
Expand Down Expand Up @@ -135,6 +137,18 @@ List<Table.KeyValue<String, String>> getRenamesKeyEntries(
String volume, String bucket, String startKey, int size) throws IOException;


/**
* Returns the previous snapshot's ozone directorInfo corresponding for the object.
*/
CheckedFunction<KeyManager, OmDirectoryInfo, IOException> getPreviousSnapshotOzoneDirInfo(
long volumeId, OmBucketInfo bucketInfo, OmDirectoryInfo directoryInfo) throws IOException;

/**
* Returns the previous snapshot's ozone directoryInfo corresponding for the object.
*/
CheckedFunction<KeyManager, OmDirectoryInfo, IOException> getPreviousSnapshotOzoneDirInfo(
long volumeId, OmBucketInfo bucketInfo, OmKeyInfo directoryInfo) throws IOException;
Comment on lines +149 to +150
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is it supposed to be a getPreviousSnapshotOzoneDirInfo or getPreviousSnapshotOzoneKeyInfo? Same for directoryInfo.


/**
* Returns a list deleted entries from the deletedTable.
*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -174,6 +174,7 @@
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.Time;
import org.apache.ratis.util.function.CheckedFunction;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

Expand Down Expand Up @@ -774,6 +775,33 @@ public List<Table.KeyValue<String, String>> getRenamesKeyEntries(
}
}

@Override
public CheckedFunction<KeyManager, OmDirectoryInfo, IOException> getPreviousSnapshotOzoneDirInfo(
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Where and how will this be used?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This would be used in the DirectoryDeletingService where the deletedDeletedDirectory table would have OmKeyInfo whereas the directory table would have OmDirectoryInfo

long volumeId, OmBucketInfo bucketInfo, OmDirectoryInfo keyInfo) throws IOException {
String currentKeyPath = metadataManager.getOzonePathKey(volumeId, bucketInfo.getObjectID(),
keyInfo.getParentObjectID(), keyInfo.getName());
return getPreviousSnapshotOzonePathInfo(bucketInfo, keyInfo.getObjectID(), currentKeyPath,
(km) -> km.getMetadataManager().getDirectoryTable());
}

@Override
public CheckedFunction<KeyManager, OmDirectoryInfo, IOException> getPreviousSnapshotOzoneDirInfo(
long volumeId, OmBucketInfo bucketInfo, OmKeyInfo keyInfo) throws IOException {
String currentKeyPath = metadataManager.getOzonePathKey(volumeId, bucketInfo.getObjectID(),
keyInfo.getParentObjectID(), keyInfo.getFileName());
return getPreviousSnapshotOzonePathInfo(bucketInfo, keyInfo.getObjectID(), currentKeyPath,
(previousSnapshotKM) -> previousSnapshotKM.getMetadataManager().getDirectoryTable());
}

private <T> CheckedFunction<KeyManager, T, IOException> getPreviousSnapshotOzonePathInfo(
OmBucketInfo bucketInfo, long objectId, String currentKeyPath,
Function<KeyManager, Table<String, T>> table) throws IOException {
String renameKey = metadataManager.getRenameKey(bucketInfo.getVolumeName(), bucketInfo.getBucketName(), objectId);
String renamedKey = metadataManager.getSnapshotRenamedTable().getIfExist(renameKey);
return (previousSnapshotKM) -> table.apply(previousSnapshotKM).get(
renamedKey != null ? renamedKey : currentKeyPath);
}

@Override
public List<Table.KeyValue<String, List<OmKeyInfo>>> getDeletedKeyEntries(
String volume, String bucket, String startKey, int size) throws IOException {
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.hadoop.ozone.om.snapshot.filter;

import java.io.IOException;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.ozone.om.KeyManager;
import org.apache.hadoop.ozone.om.OmSnapshot;
import org.apache.hadoop.ozone.om.OmSnapshotManager;
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.om.SnapshotChainManager;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.SnapshotInfo;
import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock;
import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted;

/**
* Class to filter out deleted directories which are reclaimable based on their presence in previous snapshot in
* the snapshot chain.
*/
public class ReclaimableDirFilter extends ReclaimableFilter<OmKeyInfo> {

public ReclaimableDirFilter(OzoneManager ozoneManager,
OmSnapshotManager omSnapshotManager, SnapshotChainManager snapshotChainManager,
SnapshotInfo currentSnapshotInfo, KeyManager keyManager,
IOzoneManagerLock lock) {
super(ozoneManager, omSnapshotManager, snapshotChainManager, currentSnapshotInfo, keyManager, lock, 1);
}

@Override
protected String getVolumeName(Table.KeyValue<String, OmKeyInfo> keyValue) throws IOException {
return keyValue.getValue().getVolumeName();
}

@Override
protected String getBucketName(Table.KeyValue<String, OmKeyInfo> keyValue) throws IOException {
return keyValue.getValue().getBucketName();
}

@Override
protected Boolean isReclaimable(Table.KeyValue<String, OmKeyInfo> deletedDirInfo) throws IOException {
ReferenceCounted<OmSnapshot> previousSnapshot = getPreviousOmSnapshot(0);
KeyManager prevKeyManager = previousSnapshot == null ? null : previousSnapshot.get().getKeyManager();
return isDirReclaimable(getVolumeId(), getBucketInfo(), deletedDirInfo.getValue(), getKeyManager(), prevKeyManager);
}

private boolean isDirReclaimable(long volumeId, OmBucketInfo bucketInfo, OmKeyInfo dirInfo,
KeyManager keyManager, KeyManager previousKeyManager) throws IOException {
if (previousKeyManager == null) {
return true;
}
OmDirectoryInfo prevDirectoryInfo =
keyManager.getPreviousSnapshotOzoneDirInfo(volumeId, bucketInfo, dirInfo).apply(previousKeyManager);
return prevDirectoryInfo == null || prevDirectoryInfo.getObjectID() != dirInfo.getObjectID();
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

if the directory object is not in the previous snapshot, or the object id is not the same (the directory was deleted and then a new one was created), then it is reclaimable.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yeah

}
}
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted;

/**
* Filter to return rename table entries which are reclaimable based on the key presence in previous snapshot's
* Class to filter out rename table entries which are reclaimable based on the key presence in previous snapshot's
* keyTable/DirectoryTable in the snapshot chain.
*/
public class ReclaimableRenameEntryFilter extends ReclaimableFilter<String> {
Expand Down
Loading