From 8c626c65dbcccb45da7146adde7b5d0b2818fa20 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Sun, 11 May 2025 20:12:28 -0400 Subject: [PATCH 01/20] HDDS-13021. AbstractKeyDeletingService should have unique call id for each and every ratis request Change-Id: I5c29c572df9d2240b1d58fbc88826eb5ed8ad881 --- .../om/service/AbstractKeyDeletingService.java | 14 ++++++++++---- .../ozone/om/service/SnapshotDeletingService.java | 4 ++-- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java index 0d36da711703..7e3df6337e59 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java @@ -81,6 +81,7 @@ public abstract class AbstractKeyDeletingService extends BackgroundService private final AtomicLong movedDirsCount; private final AtomicLong movedFilesCount; private final AtomicLong runCount; + private final AtomicLong callId; private final BootstrapStateHandler.Lock lock = new BootstrapStateHandler.Lock(); @@ -97,6 +98,7 @@ public AbstractKeyDeletingService(String serviceName, long interval, this.runCount = new AtomicLong(0); this.metrics = ozoneManager.getDeletionMetrics(); this.perfMetrics = ozoneManager.getPerfMetrics(); + this.callId = new AtomicLong(0); } protected int processKeyDeletes(List keyBlocksList, @@ -220,7 +222,7 @@ private int submitPurgeKeysRequest(List results, // Submit PurgeKeys request to OM try { - OzoneManagerRatisUtils.submitRequest(ozoneManager, omRequest, clientId, runCount.get()); + OzoneManagerRatisUtils.submitRequest(ozoneManager, omRequest, clientId, callId.incrementAndGet()); } catch (ServiceException e) { LOG.error("PurgeKey request failed. Will retry at next run.", e); return 0; @@ -250,7 +252,7 @@ private void addToMap(Map, List> map, String object protected void submitPurgePaths(List requests, String snapTableKey, - UUID expectedPreviousSnapshotId, long rnCnt) { + UUID expectedPreviousSnapshotId) { OzoneManagerProtocolProtos.PurgeDirectoriesRequest.Builder purgeDirRequest = OzoneManagerProtocolProtos.PurgeDirectoriesRequest.newBuilder(); @@ -275,7 +277,7 @@ protected void submitPurgePaths(List requests, // Submit Purge paths request to OM try { - OzoneManagerRatisUtils.submitRequest(ozoneManager, omRequest, clientId, rnCnt); + OzoneManagerRatisUtils.submitRequest(ozoneManager, omRequest, clientId, callId.incrementAndGet()); } catch (ServiceException e) { LOG.error("PurgePaths request failed. Will retry at next run.", e); } @@ -403,7 +405,7 @@ public void optimizeDirDeletesAndSubmitRequest( } if (!purgePathRequestList.isEmpty()) { - submitPurgePaths(purgePathRequestList, snapTableKey, expectedPreviousSnapshotId, rnCnt); + submitPurgePaths(purgePathRequestList, snapTableKey, expectedPreviousSnapshotId); } if (dirNum != 0 || subDirNum != 0 || subFileNum != 0) { @@ -613,6 +615,10 @@ public AtomicLong getRunCount() { return runCount; } + public AtomicLong getCallId() { + return callId; + } + /** * Returns the number of dirs deleted by the background service. * diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java index 5c2b16a604b3..44bb7ab088a7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java @@ -304,8 +304,8 @@ private void submitSnapshotMoveDeletedKeys(SnapshotInfo snapInfo, private void submitRequest(OMRequest omRequest) { try { - Status status = - OzoneManagerRatisUtils.submitRequest(ozoneManager, omRequest, clientId, getRunCount().get()).getStatus(); + Status status = OzoneManagerRatisUtils.submitRequest(ozoneManager, omRequest, clientId, + getCallId().incrementAndGet()).getStatus(); if (!Objects.equals(status, Status.OK)) { LOG.error("Request: {} failed with an status: {}. Will retry in the next run.", omRequest, status); } From 50b1a2309370b7f22a7335b5061631052dced432 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Sun, 11 May 2025 21:23:25 -0400 Subject: [PATCH 02/20] HDDS-13022. Segragate Exclusive Dir Size and ExclusiveKeySize in SnapshotInfo Change-Id: I16d2881af973799773335343debd856cb763f72b --- .../hadoop/ozone/client/OzoneSnapshot.java | 4 +- .../hadoop/ozone/om/helpers/SnapshotInfo.java | 48 ++++++++ .../TestSnapshotDirectoryCleaningService.java | 9 +- .../src/main/proto/OmClientProtocol.proto | 5 + .../OMSnapshotSetPropertyRequest.java | 116 ++++++++++++------ .../OMSnapshotSetPropertyResponse.java | 15 ++- .../SnapshotDirectoryCleaningService.java | 4 +- 7 files changed, 147 insertions(+), 54 deletions(-) diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneSnapshot.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneSnapshot.java index 8297fdcc853b..8f284457b04f 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneSnapshot.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneSnapshot.java @@ -194,8 +194,8 @@ public static OzoneSnapshot fromSnapshotInfo(SnapshotInfo snapshotInfo) { snapshotInfo.getCheckpointDir(), snapshotInfo.getReferencedSize(), snapshotInfo.getReferencedReplicatedSize(), - snapshotInfo.getExclusiveSize(), - snapshotInfo.getExclusiveReplicatedSize() + snapshotInfo.getExclusiveSize() + snapshotInfo.getExclusiveDirSize(), + snapshotInfo.getExclusiveReplicatedSize() + snapshotInfo.getExclusiveDirReplicatedSize() ); } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java index ab542a141be9..3a43ab2878f8 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java @@ -82,6 +82,8 @@ public final class SnapshotInfo implements Auditable, CopyObject { private long referencedReplicatedSize; private long exclusiveSize; private long exclusiveReplicatedSize; + private long exclusiveDirSize; + private long exclusiveDirReplicatedSize; private boolean deepCleanedDeletedDir; private ByteString lastTransactionInfo; @@ -104,6 +106,8 @@ private SnapshotInfo(Builder b) { this.referencedReplicatedSize = b.referencedReplicatedSize; this.exclusiveSize = b.exclusiveSize; this.exclusiveReplicatedSize = b.exclusiveReplicatedSize; + this.exclusiveDirSize = b.exclusiveDirSize; + this.exclusiveDirReplicatedSize = b.exclusiveDirReplicatedSize; this.deepCleanedDeletedDir = b.deepCleanedDeletedDir; this.lastTransactionInfo = b.lastTransactionInfo; } @@ -233,6 +237,8 @@ public SnapshotInfo.Builder toBuilder() { .setReferencedReplicatedSize(referencedReplicatedSize) .setExclusiveSize(exclusiveSize) .setExclusiveReplicatedSize(exclusiveReplicatedSize) + .setExclusiveDirSize(exclusiveDirSize) + .setExclusiveDirReplicatedSize(exclusiveDirReplicatedSize) .setDeepCleanedDeletedDir(deepCleanedDeletedDir) .setLastTransactionInfo(lastTransactionInfo); } @@ -259,6 +265,8 @@ public static class Builder { private long referencedReplicatedSize; private long exclusiveSize; private long exclusiveReplicatedSize; + private long exclusiveDirSize; + private long exclusiveDirReplicatedSize; private boolean deepCleanedDeletedDir; private ByteString lastTransactionInfo; @@ -374,6 +382,18 @@ public Builder setExclusiveReplicatedSize(long exclusiveReplicatedSize) { return this; } + /** @param exclusiveDirSize - Snapshot exclusive size. */ + public Builder setExclusiveDirSize(long exclusiveDirSize) { + this.exclusiveDirSize = exclusiveDirSize; + return this; + } + + /** @param exclusiveDirReplicatedSize - Snapshot exclusive size w/ replication. */ + public Builder setExclusiveDirReplicatedSize(long exclusiveDirReplicatedSize) { + this.exclusiveDirReplicatedSize = exclusiveDirReplicatedSize; + return this; + } + public Builder setDeepCleanedDeletedDir(boolean deepCleanedDeletedDir) { this.deepCleanedDeletedDir = deepCleanedDeletedDir; return this; @@ -408,6 +428,8 @@ public OzoneManagerProtocolProtos.SnapshotInfo getProtobuf() { .setReferencedReplicatedSize(referencedReplicatedSize) .setExclusiveSize(exclusiveSize) .setExclusiveReplicatedSize(exclusiveReplicatedSize) + .setExclusiveDirSize(exclusiveDirSize) + .setExclusiveDirReplicatedSize(exclusiveDirReplicatedSize) .setDeepCleanedDeletedDir(deepCleanedDeletedDir); if (pathPreviousSnapshotId != null) { @@ -485,6 +507,14 @@ public static SnapshotInfo getFromProtobuf( snapshotInfoProto.getExclusiveReplicatedSize()); } + if (snapshotInfoProto.hasExclusiveDirSize()) { + osib.setExclusiveDirSize(snapshotInfoProto.getExclusiveDirSize()); + } + + if (snapshotInfoProto.hasExclusiveDirReplicatedSize()) { + osib.setExclusiveDirReplicatedSize(snapshotInfoProto.getExclusiveDirReplicatedSize()); + } + if (snapshotInfoProto.hasDeepCleanedDeletedDir()) { osib.setDeepCleanedDeletedDir( snapshotInfoProto.getDeepCleanedDeletedDir()); @@ -571,10 +601,26 @@ public long getExclusiveSize() { return exclusiveSize; } + public void setExclusiveDirSize(long exclusiveDirSize) { + this.exclusiveDirSize = exclusiveDirSize; + } + + public long getExclusiveDirSize() { + return exclusiveDirSize; + } + public void setExclusiveReplicatedSize(long exclusiveReplicatedSize) { this.exclusiveReplicatedSize = exclusiveReplicatedSize; } + public void setExclusiveDirReplicatedSize(long exclusiveDirReplicatedSize) { + this.exclusiveDirReplicatedSize = exclusiveDirReplicatedSize; + } + + public long getExclusiveDirReplicatedSize() { + return exclusiveDirReplicatedSize; + } + public long getExclusiveReplicatedSize() { return exclusiveReplicatedSize; } @@ -707,6 +753,8 @@ public String toString() { ", referencedReplicatedSize: '" + referencedReplicatedSize + '\'' + ", exclusiveSize: '" + exclusiveSize + '\'' + ", exclusiveReplicatedSize: '" + exclusiveReplicatedSize + '\'' + + ", exclusiveDirSize: '" + exclusiveDirSize + '\'' + + ", exclusiveDirReplicatedSize: '" + exclusiveDirReplicatedSize + '\'' + ", deepCleanedDeletedDir: '" + deepCleanedDeletedDir + '\'' + ", lastTransactionInfo: '" + lastTransactionInfo + '\'' + '}'; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDirectoryCleaningService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDirectoryCleaningService.java index 3f6e007ef4ad..2b4a3bab812a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDirectoryCleaningService.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDirectoryCleaningService.java @@ -234,17 +234,18 @@ public void testExclusiveSizeWithDirectoryDeepClean() throws Exception { put("snap2", 5L); put("snap3", 0L); }}; - Thread.sleep(500); try (TableIterator> iterator = snapshotInfoTable.iterator()) { while (iterator.hasNext()) { Table.KeyValue snapshotEntry = iterator.next(); String snapshotName = snapshotEntry.getValue().getName(); - assertEquals(expectedSize.get(snapshotName), snapshotEntry.getValue(). - getExclusiveSize()); + SnapshotInfo snapshotInfo = snapshotInfoTable.get(snapshotEntry.getKey()); + System.out.println(snapshotInfo.getName() + " " + snapshotInfo.getDeepCleanedDeletedDir()); + assertEquals(expectedSize.get(snapshotName), + snapshotInfo.getExclusiveSize() + snapshotInfo.getExclusiveDirSize()); // Since for the test we are using RATIS/THREE assertEquals(expectedSize.get(snapshotName) * 3, - snapshotEntry.getValue().getExclusiveReplicatedSize()); + snapshotInfo.getExclusiveReplicatedSize() + snapshotInfo.getExclusiveDirReplicatedSize()); } } diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index c7ff385016f7..60f609808cae 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -303,6 +303,7 @@ message OMRequest { optional GetObjectTaggingRequest getObjectTaggingRequest = 140; optional PutObjectTaggingRequest putObjectTaggingRequest = 141; optional DeleteObjectTaggingRequest deleteObjectTaggingRequest = 142; + repeated SetSnapshotPropertyRequest SetSnapshotPropertyRequests = 143; } message OMResponse { @@ -892,6 +893,9 @@ message SnapshotInfo { // note: shared sizes can be calculated from: referenced - exclusive optional bool deepCleanedDeletedDir = 19; optional bytes lastTransactionInfo = 20; + optional uint64 exclusiveDirSize = 21; + // snapshot exclusive size after replication + optional uint64 exclusiveDirReplicatedSize = 22; } message SnapshotDiffJobProto { @@ -2029,6 +2033,7 @@ message SetSnapshotPropertyRequest { optional SnapshotSize snapshotSize = 3; optional bool deepCleanedDeletedDir = 4; optional bool deepCleanedDeletedKey = 5; + optional SnapshotSize snapshotDirSize = 6; } // SnapshotProperty in entirely deprecated, Keeping it here for proto.lock compatibility diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotSetPropertyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotSetPropertyRequest.java index 1fc847bf6748..d9594bfe618e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotSetPropertyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotSetPropertyRequest.java @@ -18,8 +18,16 @@ package org.apache.hadoop.ozone.om.request.snapshot; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_REQUEST; +import com.google.common.collect.Lists; import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.om.OMMetadataManager; @@ -49,6 +57,32 @@ public OMSnapshotSetPropertyRequest(OMRequest omRequest) { super(omRequest); } + private void updateSnapshotProperty( + SnapshotInfo snapInfo, OzoneManagerProtocolProtos.SetSnapshotPropertyRequest setSnapshotPropertyRequest) { + if (setSnapshotPropertyRequest.hasDeepCleanedDeletedDir()) { + snapInfo.setDeepCleanedDeletedDir(setSnapshotPropertyRequest + .getDeepCleanedDeletedDir()); + } + + if (setSnapshotPropertyRequest.hasDeepCleanedDeletedKey()) { + snapInfo.setDeepClean(setSnapshotPropertyRequest + .getDeepCleanedDeletedKey()); + } + + if (setSnapshotPropertyRequest.hasSnapshotSize()) { + SnapshotSize snapshotSize = setSnapshotPropertyRequest.getSnapshotSize(); + // Set Exclusive size. + snapInfo.setExclusiveSize(snapshotSize.getExclusiveSize()); + snapInfo.setExclusiveReplicatedSize(snapshotSize.getExclusiveReplicatedSize()); + } + if (setSnapshotPropertyRequest.hasSnapshotDirSize()) { + SnapshotSize snapshotSize = setSnapshotPropertyRequest.getSnapshotDirSize(); + // Set Exclusive size. + snapInfo.setExclusiveDirSize(snapshotSize.getExclusiveSize()); + snapInfo.setExclusiveDirReplicatedSize(snapshotSize.getExclusiveReplicatedSize()); + } + } + @Override public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { OMMetrics omMetrics = ozoneManager.getMetrics(); @@ -58,55 +92,57 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Execut OzoneManagerProtocolProtos.OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(getOmRequest()); - OzoneManagerProtocolProtos.SetSnapshotPropertyRequest - setSnapshotPropertyRequest = getOmRequest() - .getSetSnapshotPropertyRequest(); - - String snapshotKey = setSnapshotPropertyRequest.getSnapshotKey(); - + List setSnapshotPropertyRequests = Lists.newArrayList(); + if (getOmRequest().hasSetSnapshotPropertyRequest()) { + setSnapshotPropertyRequests.add(getOmRequest().getSetSnapshotPropertyRequest()); + } + setSnapshotPropertyRequests.addAll(getOmRequest().getSetSnapshotPropertyRequestsList()); + Set snapshotKeys = new HashSet<>(); + Map snapshotInfoMap = new HashMap<>(); try { - SnapshotInfo updatedSnapInfo = metadataManager.getSnapshotInfoTable().get(snapshotKey); - if (updatedSnapInfo == null) { - LOG.error("Snapshot: '{}' doesn't not exist in snapshot table.", snapshotKey); - throw new OMException("Snapshot: '{" + snapshotKey + "}' doesn't not exist in snapshot table.", FILE_NOT_FOUND); + for (OzoneManagerProtocolProtos.SetSnapshotPropertyRequest setSnapshotPropertyRequest : + setSnapshotPropertyRequests) { + String snapshotKey = setSnapshotPropertyRequest.getSnapshotKey(); + if (snapshotKeys.contains(snapshotKey)) { + throw new OMException("Snapshot with snapshot key: " + snapshotKey + " added multiple times in the request. " + + "Request: " + setSnapshotPropertyRequests, INVALID_REQUEST); + } + snapshotKeys.add(snapshotKey); + SnapshotInfo updatedSnapInfo = snapshotInfoMap.computeIfAbsent(snapshotKey, + (k) -> { + try { + return metadataManager.getSnapshotInfoTable().get(k); + } catch (IOException e) { + throw new UncheckedIOException("Exception while getting key " + k, e); + } + }); + if (updatedSnapInfo == null) { + LOG.error("Snapshot: '{}' doesn't not exist in snapshot table.", snapshotKey); + throw new OMException("Snapshot: '{}' doesn't not exist in snapshot table." + snapshotKey + + "Request: " + setSnapshotPropertyRequests, FILE_NOT_FOUND); + } + updateSnapshotProperty(updatedSnapInfo, setSnapshotPropertyRequest); } - - if (setSnapshotPropertyRequest.hasDeepCleanedDeletedDir()) { - updatedSnapInfo.setDeepCleanedDeletedDir(setSnapshotPropertyRequest - .getDeepCleanedDeletedDir()); + if (snapshotInfoMap.isEmpty()) { + throw new OMException("Snapshots: " + snapshotKeys + " don't not exist in snapshot table.", + FILE_NOT_FOUND); } - - if (setSnapshotPropertyRequest.hasDeepCleanedDeletedKey()) { - updatedSnapInfo.setDeepClean(setSnapshotPropertyRequest - .getDeepCleanedDeletedKey()); + // Update Table Cache + for (Map.Entry snapshot : snapshotInfoMap.entrySet()) { + metadataManager.getSnapshotInfoTable().addCacheEntry( + new CacheKey<>(snapshot.getKey()), + CacheValue.get(context.getIndex(), snapshot.getValue())); + omMetrics.incNumSnapshotSetProperties(); } - if (setSnapshotPropertyRequest.hasSnapshotSize()) { - SnapshotSize snapshotSize = setSnapshotPropertyRequest - .getSnapshotSize(); - long exclusiveSize = updatedSnapInfo.getExclusiveSize() + - snapshotSize.getExclusiveSize(); - long exclusiveReplicatedSize = updatedSnapInfo - .getExclusiveReplicatedSize() + snapshotSize - .getExclusiveReplicatedSize(); - // Set Exclusive size. - updatedSnapInfo.setExclusiveSize(exclusiveSize); - updatedSnapInfo.setExclusiveReplicatedSize(exclusiveReplicatedSize); - } - // Update Table Cache - metadataManager.getSnapshotInfoTable().addCacheEntry( - new CacheKey<>(snapshotKey), - CacheValue.get(context.getIndex(), updatedSnapInfo)); - omClientResponse = new OMSnapshotSetPropertyResponse( - omResponse.build(), updatedSnapInfo); - omMetrics.incNumSnapshotSetProperties(); - LOG.info("Successfully executed snapshotSetPropertyRequest: {{}}.", setSnapshotPropertyRequest); - } catch (IOException ex) { + omClientResponse = new OMSnapshotSetPropertyResponse(omResponse.build(), snapshotInfoMap.values()); + LOG.info("Successfully executed snapshotSetPropertyRequest: {{}}.", setSnapshotPropertyRequests); + } catch (UncheckedIOException | IOException ex) { omClientResponse = new OMSnapshotSetPropertyResponse( createErrorOMResponse(omResponse, ex)); omMetrics.incNumSnapshotSetPropertyFails(); - LOG.error("Failed to execute snapshotSetPropertyRequest: {{}}.", setSnapshotPropertyRequest, ex); + LOG.error("Failed to execute snapshotSetPropertyRequest: {{}}.", setSnapshotPropertyRequests, ex); } return omClientResponse; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotSetPropertyResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotSetPropertyResponse.java index 6f2bcddd11df..5d0f8d642b49 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotSetPropertyResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotSetPropertyResponse.java @@ -21,6 +21,7 @@ import jakarta.annotation.Nonnull; import java.io.IOException; +import java.util.Collection; import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; @@ -33,26 +34,28 @@ */ @CleanupTableInfo(cleanupTables = {SNAPSHOT_INFO_TABLE}) public class OMSnapshotSetPropertyResponse extends OMClientResponse { - private final SnapshotInfo updatedSnapInfo; + private final Collection updatedSnapInfos; public OMSnapshotSetPropertyResponse( @Nonnull OMResponse omResponse, - @Nonnull SnapshotInfo updatedSnapInfo) { + @Nonnull Collection updatedSnapInfos) { super(omResponse); - this.updatedSnapInfo = updatedSnapInfo; + this.updatedSnapInfos = updatedSnapInfos; } public OMSnapshotSetPropertyResponse(@Nonnull OMResponse omResponse) { super(omResponse); checkStatusNotOK(); - this.updatedSnapInfo = null; + this.updatedSnapInfos = null; } @Override protected void addToDBBatch(OMMetadataManager omMetadataManager, BatchOperation batchOperation) throws IOException { - omMetadataManager.getSnapshotInfoTable().putWithBatch(batchOperation, - updatedSnapInfo.getTableKey(), updatedSnapInfo); + for (SnapshotInfo updatedSnapInfo : updatedSnapInfos) { + omMetadataManager.getSnapshotInfoTable().putWithBatch(batchOperation, + updatedSnapInfo.getTableKey(), updatedSnapInfo); + } } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java index ed79c13ff971..3a307804555e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java @@ -330,7 +330,7 @@ private void iterateDirectoryTree( } } - private void updateExclusiveSize(String prevSnapshotKeyTable) { + private void updateExclusiveSize(String prevSnapshotKeyTable) throws IOException { ClientId clientId = ClientId.randomId(); SnapshotSize snapshotSize = SnapshotSize.newBuilder() .setExclusiveSize( @@ -345,7 +345,7 @@ private void updateExclusiveSize(String prevSnapshotKeyTable) { setSnapshotPropertyRequest = SetSnapshotPropertyRequest.newBuilder() .setSnapshotKey(prevSnapshotKeyTable) - .setSnapshotSize(snapshotSize) + .setSnapshotDirSize(snapshotSize) .build(); OMRequest omRequest = OMRequest.newBuilder() From 685aac2915e5c427a979c82ee79290851d3d3362 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 12 May 2025 05:27:39 -0400 Subject: [PATCH 03/20] HDDS-13022. Add Test OzoneSnapshot client code Change-Id: I67d685aff12daaf98cf6a8eb5f6a5750c4cf6394 --- .../hadoop/ozone/client/OzoneSnapshot.java | 18 ++++++ .../ozone/client/TestOzoneSnapshot.java | 61 +++++++++++++++++++ .../ozone/om/helpers/TestOmSnapshotInfo.java | 8 ++- 3 files changed, 86 insertions(+), 1 deletion(-) create mode 100644 hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneSnapshot.java diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneSnapshot.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneSnapshot.java index 8f284457b04f..17b44fc367b4 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneSnapshot.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneSnapshot.java @@ -17,6 +17,7 @@ package org.apache.hadoop.ozone.client; +import java.util.Objects; import java.util.UUID; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus; @@ -198,4 +199,21 @@ public static OzoneSnapshot fromSnapshotInfo(SnapshotInfo snapshotInfo) { snapshotInfo.getExclusiveReplicatedSize() + snapshotInfo.getExclusiveDirReplicatedSize() ); } + + @Override + public final boolean equals(Object o) { + if (!(o instanceof OzoneSnapshot)) { + return false; + } + + OzoneSnapshot that = (OzoneSnapshot) o; + return creationTime == that.creationTime && referencedSize == that.referencedSize && + referencedReplicatedSize == that.referencedReplicatedSize && exclusiveSize == that.exclusiveSize && + exclusiveReplicatedSize == that.exclusiveReplicatedSize && + Objects.equals(volumeName, that.volumeName) && Objects.equals(bucketName, that.bucketName) && + Objects.equals(name, that.name) && snapshotStatus == that.snapshotStatus && + Objects.equals(snapshotId, that.snapshotId) && + Objects.equals(snapshotPath, that.snapshotPath) && + Objects.equals(checkpointDir, that.checkpointDir); + } } diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneSnapshot.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneSnapshot.java new file mode 100644 index 000000000000..d171a23fc28f --- /dev/null +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneSnapshot.java @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.client; + +import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.Mockito.when; + +import java.util.UUID; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.mockito.Mockito; + +public class TestOzoneSnapshot { + + private SnapshotInfo getMockedSnapshotInfo(UUID snapshotId) { + SnapshotInfo snapshotInfo = Mockito.mock(SnapshotInfo.class); + when(snapshotInfo.getVolumeName()).thenReturn("volume"); + when(snapshotInfo.getBucketName()).thenReturn("bucket"); + when(snapshotInfo.getName()).thenReturn("snap"); + when(snapshotInfo.getCreationTime()).thenReturn(1000L); + when(snapshotInfo.getSnapshotStatus()).thenReturn(SNAPSHOT_ACTIVE); + when(snapshotInfo.getSnapshotId()).thenReturn(snapshotId); + when(snapshotInfo.getSnapshotPath()).thenReturn("volume/bucket"); + when(snapshotInfo.getCheckpointDir()).thenReturn("checkpointDir"); + when(snapshotInfo.getReferencedSize()).thenReturn(1000L); + when(snapshotInfo.getReferencedReplicatedSize()).thenReturn(3000L); + when(snapshotInfo.getExclusiveSize()).thenReturn(4000L); + when(snapshotInfo.getExclusiveReplicatedSize()).thenReturn(12000L); + when(snapshotInfo.getExclusiveDirSize()).thenReturn(2000L); + when(snapshotInfo.getExclusiveDirReplicatedSize()).thenReturn(6000L); + return snapshotInfo; + } + + @Test + public void testOzoneSnapshotFromSnapshotInfo() { + UUID snapshotId = UUID.randomUUID(); + SnapshotInfo snapshotInfo = getMockedSnapshotInfo(snapshotId); + OzoneSnapshot ozoneSnapshot = OzoneSnapshot.fromSnapshotInfo(snapshotInfo); + OzoneSnapshot expectedOzoneSnapshot = new OzoneSnapshot( + "volume", "bucket", "snap", 1000L, SNAPSHOT_ACTIVE, snapshotId, + "volume/bucket", "checkpointDir", 1000L, 3000L, 6000L, 18000L); + assertEquals(expectedOzoneSnapshot, ozoneSnapshot); + } +} diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java index 677458074a9e..e10965f0a66e 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java @@ -67,7 +67,9 @@ private SnapshotInfo createSnapshotInfo() { .setReferencedSize(2000L) .setReferencedReplicatedSize(6000L) .setExclusiveSize(1000L) + .setExclusiveDirSize(2000L) .setExclusiveReplicatedSize(3000L) + .setExclusiveDirReplicatedSize(6000L) .setDeepCleanedDeletedDir(false) .build(); } @@ -92,6 +94,8 @@ private OzoneManagerProtocolProtos.SnapshotInfo createSnapshotInfoProto() { .setReferencedReplicatedSize(6000L) .setExclusiveSize(1000L) .setExclusiveReplicatedSize(3000L) + .setExclusiveDirSize(2000L) + .setExclusiveDirReplicatedSize(6000L) .setDeepCleanedDeletedDir(false) .build(); } @@ -179,7 +183,9 @@ public void testSnapshotInfoProtoToSnapshotInfo() { snapshotInfoActual.getExclusiveReplicatedSize()); assertEquals(snapshotInfoExpected.getDeepCleanedDeletedDir(), snapshotInfoActual.getDeepCleanedDeletedDir()); - + assertEquals(snapshotInfoExpected.getExclusiveDirSize(), snapshotInfoActual.getExclusiveDirSize()); + assertEquals(snapshotInfoExpected.getExclusiveDirReplicatedSize(), + snapshotInfoActual.getExclusiveDirReplicatedSize()); assertEquals(snapshotInfoExpected, snapshotInfoActual); } From c29d3d8be185b2e640cd18ffbe2dc6f921f9ec34 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 12 May 2025 06:29:54 -0400 Subject: [PATCH 04/20] HDDS-13022. Add hashcode for OzoneSnapshots Change-Id: I4215cacc0f0486a7aa46b60971149483028bed38 --- .../java/org/apache/hadoop/ozone/client/OzoneSnapshot.java | 6 ++++++ .../org/apache/hadoop/ozone/client/TestOzoneSnapshot.java | 1 - 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneSnapshot.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneSnapshot.java index 17b44fc367b4..9213737b96c8 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneSnapshot.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneSnapshot.java @@ -216,4 +216,10 @@ public final boolean equals(Object o) { Objects.equals(snapshotPath, that.snapshotPath) && Objects.equals(checkpointDir, that.checkpointDir); } + + @Override + public int hashCode() { + return Objects.hash(volumeName, bucketName, name, creationTime, snapshotStatus, snapshotId, snapshotPath, + checkpointDir, referencedSize, referencedReplicatedSize, exclusiveSize, exclusiveReplicatedSize); + } } diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneSnapshot.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneSnapshot.java index d171a23fc28f..eafd9a2e846f 100644 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneSnapshot.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneSnapshot.java @@ -23,7 +23,6 @@ import java.util.UUID; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import org.mockito.Mockito; From 49184f69c1bd54b0e6bca40daedc9f7c361f5c2e Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 12 May 2025 06:31:12 -0400 Subject: [PATCH 05/20] HDDS-13022. Add hashcode for OzoneSnapshots Change-Id: I7a05f30d0a58538708e5acdbce36d836b6a5542e --- .../java/org/apache/hadoop/ozone/client/TestOzoneSnapshot.java | 3 +++ 1 file changed, 3 insertions(+) diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneSnapshot.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneSnapshot.java index eafd9a2e846f..1e975e3ca1a5 100644 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneSnapshot.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneSnapshot.java @@ -26,6 +26,9 @@ import org.junit.jupiter.api.Test; import org.mockito.Mockito; +/** + * Test class for OzoneSnapshot class. + */ public class TestOzoneSnapshot { private SnapshotInfo getMockedSnapshotInfo(UUID snapshotId) { From ea94417f5c5fd25a0f385f881fb8f017fc2b6ab9 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Tue, 13 May 2025 18:53:06 -0400 Subject: [PATCH 06/20] HDDS-13022. Address review comments Change-Id: I86c0adae571debf1e35e86660beb8c6a62d8b6fc --- .../hadoop/ozone/client/OzoneSnapshot.java | 4 +- .../ozone/client/TestOzoneSnapshot.java | 4 +- .../hadoop/ozone/om/helpers/SnapshotInfo.java | 64 ++++++++++--------- .../ozone/om/helpers/TestOmSnapshotInfo.java | 15 +++-- .../TestSnapshotDirectoryCleaningService.java | 4 +- .../src/main/proto/OmClientProtocol.proto | 4 +- .../OMSnapshotSetPropertyRequest.java | 4 +- 7 files changed, 51 insertions(+), 48 deletions(-) diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneSnapshot.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneSnapshot.java index 9213737b96c8..b7bf7051caeb 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneSnapshot.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneSnapshot.java @@ -195,8 +195,8 @@ public static OzoneSnapshot fromSnapshotInfo(SnapshotInfo snapshotInfo) { snapshotInfo.getCheckpointDir(), snapshotInfo.getReferencedSize(), snapshotInfo.getReferencedReplicatedSize(), - snapshotInfo.getExclusiveSize() + snapshotInfo.getExclusiveDirSize(), - snapshotInfo.getExclusiveReplicatedSize() + snapshotInfo.getExclusiveDirReplicatedSize() + snapshotInfo.getExclusiveSize() + snapshotInfo.getExclusiveSizeDeltaFromDirDeepCleaning(), + snapshotInfo.getExclusiveReplicatedSize() + snapshotInfo.getExclusiveReplicatedSizeDeltaFromDirDeepCleaning() ); } diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneSnapshot.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneSnapshot.java index 1e975e3ca1a5..8980e28b59b4 100644 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneSnapshot.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneSnapshot.java @@ -45,8 +45,8 @@ private SnapshotInfo getMockedSnapshotInfo(UUID snapshotId) { when(snapshotInfo.getReferencedReplicatedSize()).thenReturn(3000L); when(snapshotInfo.getExclusiveSize()).thenReturn(4000L); when(snapshotInfo.getExclusiveReplicatedSize()).thenReturn(12000L); - when(snapshotInfo.getExclusiveDirSize()).thenReturn(2000L); - when(snapshotInfo.getExclusiveDirReplicatedSize()).thenReturn(6000L); + when(snapshotInfo.getExclusiveSizeDeltaFromDirDeepCleaning()).thenReturn(2000L); + when(snapshotInfo.getExclusiveReplicatedSizeDeltaFromDirDeepCleaning()).thenReturn(6000L); return snapshotInfo; } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java index 3a43ab2878f8..4f4c9038f216 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java @@ -82,8 +82,8 @@ public final class SnapshotInfo implements Auditable, CopyObject { private long referencedReplicatedSize; private long exclusiveSize; private long exclusiveReplicatedSize; - private long exclusiveDirSize; - private long exclusiveDirReplicatedSize; + private long exclusiveSizeDeltaFromDirDeepCleaning; + private long exclusiveReplicatedSizeDeltaFromDirDeepCleaning; private boolean deepCleanedDeletedDir; private ByteString lastTransactionInfo; @@ -106,8 +106,8 @@ private SnapshotInfo(Builder b) { this.referencedReplicatedSize = b.referencedReplicatedSize; this.exclusiveSize = b.exclusiveSize; this.exclusiveReplicatedSize = b.exclusiveReplicatedSize; - this.exclusiveDirSize = b.exclusiveDirSize; - this.exclusiveDirReplicatedSize = b.exclusiveDirReplicatedSize; + this.exclusiveSizeDeltaFromDirDeepCleaning = b.exclusiveSizeDeltaFromDirDeepCleaning; + this.exclusiveReplicatedSizeDeltaFromDirDeepCleaning = b.exclusiveReplicatedSizeDeltaFromDirDeepCleaning; this.deepCleanedDeletedDir = b.deepCleanedDeletedDir; this.lastTransactionInfo = b.lastTransactionInfo; } @@ -237,8 +237,8 @@ public SnapshotInfo.Builder toBuilder() { .setReferencedReplicatedSize(referencedReplicatedSize) .setExclusiveSize(exclusiveSize) .setExclusiveReplicatedSize(exclusiveReplicatedSize) - .setExclusiveDirSize(exclusiveDirSize) - .setExclusiveDirReplicatedSize(exclusiveDirReplicatedSize) + .setExclusiveSizeDeltaFromDirDeepCleaning(exclusiveSizeDeltaFromDirDeepCleaning) + .setExclusiveReplicatedSizeDeltaFromDirDeepCleaning(exclusiveReplicatedSizeDeltaFromDirDeepCleaning) .setDeepCleanedDeletedDir(deepCleanedDeletedDir) .setLastTransactionInfo(lastTransactionInfo); } @@ -265,8 +265,8 @@ public static class Builder { private long referencedReplicatedSize; private long exclusiveSize; private long exclusiveReplicatedSize; - private long exclusiveDirSize; - private long exclusiveDirReplicatedSize; + private long exclusiveSizeDeltaFromDirDeepCleaning; + private long exclusiveReplicatedSizeDeltaFromDirDeepCleaning; private boolean deepCleanedDeletedDir; private ByteString lastTransactionInfo; @@ -382,15 +382,16 @@ public Builder setExclusiveReplicatedSize(long exclusiveReplicatedSize) { return this; } - /** @param exclusiveDirSize - Snapshot exclusive size. */ - public Builder setExclusiveDirSize(long exclusiveDirSize) { - this.exclusiveDirSize = exclusiveDirSize; + /** @param exclusiveSizeDeltaFromDirDeepCleaning - Snapshot exclusive size. */ + public Builder setExclusiveSizeDeltaFromDirDeepCleaning(long exclusiveSizeDeltaFromDirDeepCleaning) { + this.exclusiveSizeDeltaFromDirDeepCleaning = exclusiveSizeDeltaFromDirDeepCleaning; return this; } - /** @param exclusiveDirReplicatedSize - Snapshot exclusive size w/ replication. */ - public Builder setExclusiveDirReplicatedSize(long exclusiveDirReplicatedSize) { - this.exclusiveDirReplicatedSize = exclusiveDirReplicatedSize; + /** @param exclusiveReplicatedSizeDeltaFromDirDeepCleaning - Snapshot exclusive size w/ replication. */ + public Builder setExclusiveReplicatedSizeDeltaFromDirDeepCleaning( + long exclusiveReplicatedSizeDeltaFromDirDeepCleaning) { + this.exclusiveReplicatedSizeDeltaFromDirDeepCleaning = exclusiveReplicatedSizeDeltaFromDirDeepCleaning; return this; } @@ -428,8 +429,8 @@ public OzoneManagerProtocolProtos.SnapshotInfo getProtobuf() { .setReferencedReplicatedSize(referencedReplicatedSize) .setExclusiveSize(exclusiveSize) .setExclusiveReplicatedSize(exclusiveReplicatedSize) - .setExclusiveDirSize(exclusiveDirSize) - .setExclusiveDirReplicatedSize(exclusiveDirReplicatedSize) + .setExclusiveSizeDeltaFromDirDeepCleaning(exclusiveSizeDeltaFromDirDeepCleaning) + .setExclusiveReplicatedSizeDeltaFromDirDeepCleaning(exclusiveReplicatedSizeDeltaFromDirDeepCleaning) .setDeepCleanedDeletedDir(deepCleanedDeletedDir); if (pathPreviousSnapshotId != null) { @@ -507,12 +508,13 @@ public static SnapshotInfo getFromProtobuf( snapshotInfoProto.getExclusiveReplicatedSize()); } - if (snapshotInfoProto.hasExclusiveDirSize()) { - osib.setExclusiveDirSize(snapshotInfoProto.getExclusiveDirSize()); + if (snapshotInfoProto.hasExclusiveSizeDeltaFromDirDeepCleaning()) { + osib.setExclusiveSizeDeltaFromDirDeepCleaning(snapshotInfoProto.getExclusiveSizeDeltaFromDirDeepCleaning()); } - if (snapshotInfoProto.hasExclusiveDirReplicatedSize()) { - osib.setExclusiveDirReplicatedSize(snapshotInfoProto.getExclusiveDirReplicatedSize()); + if (snapshotInfoProto.hasExclusiveReplicatedSizeDeltaFromDirDeepCleaning()) { + osib.setExclusiveReplicatedSizeDeltaFromDirDeepCleaning( + snapshotInfoProto.getExclusiveReplicatedSizeDeltaFromDirDeepCleaning()); } if (snapshotInfoProto.hasDeepCleanedDeletedDir()) { @@ -601,24 +603,24 @@ public long getExclusiveSize() { return exclusiveSize; } - public void setExclusiveDirSize(long exclusiveDirSize) { - this.exclusiveDirSize = exclusiveDirSize; + public void setExclusiveSizeDeltaFromDirDeepCleaning(long exclusiveSizeDeltaFromDirDeepCleaning) { + this.exclusiveSizeDeltaFromDirDeepCleaning = exclusiveSizeDeltaFromDirDeepCleaning; } - public long getExclusiveDirSize() { - return exclusiveDirSize; + public long getExclusiveSizeDeltaFromDirDeepCleaning() { + return exclusiveSizeDeltaFromDirDeepCleaning; } public void setExclusiveReplicatedSize(long exclusiveReplicatedSize) { this.exclusiveReplicatedSize = exclusiveReplicatedSize; } - public void setExclusiveDirReplicatedSize(long exclusiveDirReplicatedSize) { - this.exclusiveDirReplicatedSize = exclusiveDirReplicatedSize; + public void setExclusiveReplicatedSizeDeltaFromDirDeepCleaning(long exclusiveReplicatedSizeDeltaFromDirDeepCleaning) { + this.exclusiveReplicatedSizeDeltaFromDirDeepCleaning = exclusiveReplicatedSizeDeltaFromDirDeepCleaning; } - public long getExclusiveDirReplicatedSize() { - return exclusiveDirReplicatedSize; + public long getExclusiveReplicatedSizeDeltaFromDirDeepCleaning() { + return exclusiveReplicatedSizeDeltaFromDirDeepCleaning; } public long getExclusiveReplicatedSize() { @@ -753,9 +755,9 @@ public String toString() { ", referencedReplicatedSize: '" + referencedReplicatedSize + '\'' + ", exclusiveSize: '" + exclusiveSize + '\'' + ", exclusiveReplicatedSize: '" + exclusiveReplicatedSize + '\'' + - ", exclusiveDirSize: '" + exclusiveDirSize + '\'' + - ", exclusiveDirReplicatedSize: '" + exclusiveDirReplicatedSize + '\'' + - ", deepCleanedDeletedDir: '" + deepCleanedDeletedDir + '\'' + + ", exclusiveSizeDeltaFromDirDeepCleaning: '" + exclusiveSizeDeltaFromDirDeepCleaning + '\'' + + ", exclusiveReplicatedSizeDeltaFromDirDeepCleaning: '" + exclusiveReplicatedSizeDeltaFromDirDeepCleaning + + "', deepCleanedDeletedDir: '" + deepCleanedDeletedDir + '\'' + ", lastTransactionInfo: '" + lastTransactionInfo + '\'' + '}'; } diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java index e10965f0a66e..eec64b90d45b 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java @@ -67,9 +67,9 @@ private SnapshotInfo createSnapshotInfo() { .setReferencedSize(2000L) .setReferencedReplicatedSize(6000L) .setExclusiveSize(1000L) - .setExclusiveDirSize(2000L) + .setExclusiveSizeDeltaFromDirDeepCleaning(2000L) .setExclusiveReplicatedSize(3000L) - .setExclusiveDirReplicatedSize(6000L) + .setExclusiveReplicatedSizeDeltaFromDirDeepCleaning(6000L) .setDeepCleanedDeletedDir(false) .build(); } @@ -94,8 +94,8 @@ private OzoneManagerProtocolProtos.SnapshotInfo createSnapshotInfoProto() { .setReferencedReplicatedSize(6000L) .setExclusiveSize(1000L) .setExclusiveReplicatedSize(3000L) - .setExclusiveDirSize(2000L) - .setExclusiveDirReplicatedSize(6000L) + .setExclusiveSizeDeltaFromDirDeepCleaning(2000L) + .setExclusiveReplicatedSizeDeltaFromDirDeepCleaning(6000L) .setDeepCleanedDeletedDir(false) .build(); } @@ -183,9 +183,10 @@ public void testSnapshotInfoProtoToSnapshotInfo() { snapshotInfoActual.getExclusiveReplicatedSize()); assertEquals(snapshotInfoExpected.getDeepCleanedDeletedDir(), snapshotInfoActual.getDeepCleanedDeletedDir()); - assertEquals(snapshotInfoExpected.getExclusiveDirSize(), snapshotInfoActual.getExclusiveDirSize()); - assertEquals(snapshotInfoExpected.getExclusiveDirReplicatedSize(), - snapshotInfoActual.getExclusiveDirReplicatedSize()); + assertEquals(snapshotInfoExpected.getExclusiveSizeDeltaFromDirDeepCleaning(), + snapshotInfoActual.getExclusiveSizeDeltaFromDirDeepCleaning()); + assertEquals(snapshotInfoExpected.getExclusiveReplicatedSizeDeltaFromDirDeepCleaning(), + snapshotInfoActual.getExclusiveReplicatedSizeDeltaFromDirDeepCleaning()); assertEquals(snapshotInfoExpected, snapshotInfoActual); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDirectoryCleaningService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDirectoryCleaningService.java index 2b4a3bab812a..84307c55490a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDirectoryCleaningService.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDirectoryCleaningService.java @@ -242,10 +242,10 @@ public void testExclusiveSizeWithDirectoryDeepClean() throws Exception { SnapshotInfo snapshotInfo = snapshotInfoTable.get(snapshotEntry.getKey()); System.out.println(snapshotInfo.getName() + " " + snapshotInfo.getDeepCleanedDeletedDir()); assertEquals(expectedSize.get(snapshotName), - snapshotInfo.getExclusiveSize() + snapshotInfo.getExclusiveDirSize()); + snapshotInfo.getExclusiveSize() + snapshotInfo.getExclusiveSizeDeltaFromDirDeepCleaning()); // Since for the test we are using RATIS/THREE assertEquals(expectedSize.get(snapshotName) * 3, - snapshotInfo.getExclusiveReplicatedSize() + snapshotInfo.getExclusiveDirReplicatedSize()); + snapshotInfo.getExclusiveReplicatedSize() + snapshotInfo.getExclusiveReplicatedSizeDeltaFromDirDeepCleaning()); } } diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index 60f609808cae..295f00a53ed3 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -893,9 +893,9 @@ message SnapshotInfo { // note: shared sizes can be calculated from: referenced - exclusive optional bool deepCleanedDeletedDir = 19; optional bytes lastTransactionInfo = 20; - optional uint64 exclusiveDirSize = 21; + optional uint64 exclusiveSizeDeltaFromDirDeepCleaning = 21; // snapshot exclusive size after replication - optional uint64 exclusiveDirReplicatedSize = 22; + optional uint64 exclusiveReplicatedSizeDeltaFromDirDeepCleaning = 22; } message SnapshotDiffJobProto { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotSetPropertyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotSetPropertyRequest.java index d9594bfe618e..e26f82891015 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotSetPropertyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotSetPropertyRequest.java @@ -78,8 +78,8 @@ private void updateSnapshotProperty( if (setSnapshotPropertyRequest.hasSnapshotDirSize()) { SnapshotSize snapshotSize = setSnapshotPropertyRequest.getSnapshotDirSize(); // Set Exclusive size. - snapInfo.setExclusiveDirSize(snapshotSize.getExclusiveSize()); - snapInfo.setExclusiveDirReplicatedSize(snapshotSize.getExclusiveReplicatedSize()); + snapInfo.setExclusiveSizeDeltaFromDirDeepCleaning(snapshotSize.getExclusiveSize()); + snapInfo.setExclusiveReplicatedSizeDeltaFromDirDeepCleaning(snapshotSize.getExclusiveReplicatedSize()); } } From 7bcbda6d2b77f04072d82fb91392fcfcf2f45d91 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Tue, 13 May 2025 19:00:50 -0400 Subject: [PATCH 07/20] HDDS-13022. Address review comments Change-Id: I403f238e8fd1178540da893f73a42aa2223572ad --- .../interface-client/src/main/proto/OmClientProtocol.proto | 2 +- .../om/request/snapshot/OMSnapshotSetPropertyRequest.java | 4 ++-- .../ozone/om/service/SnapshotDirectoryCleaningService.java | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index 295f00a53ed3..b56912ff6d2c 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -2033,7 +2033,7 @@ message SetSnapshotPropertyRequest { optional SnapshotSize snapshotSize = 3; optional bool deepCleanedDeletedDir = 4; optional bool deepCleanedDeletedKey = 5; - optional SnapshotSize snapshotDirSize = 6; + optional SnapshotSize snapshotSizeDeltaFromDirDeepCleaning = 6; } // SnapshotProperty in entirely deprecated, Keeping it here for proto.lock compatibility diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotSetPropertyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotSetPropertyRequest.java index e26f82891015..6491bd639169 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotSetPropertyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotSetPropertyRequest.java @@ -75,8 +75,8 @@ private void updateSnapshotProperty( snapInfo.setExclusiveSize(snapshotSize.getExclusiveSize()); snapInfo.setExclusiveReplicatedSize(snapshotSize.getExclusiveReplicatedSize()); } - if (setSnapshotPropertyRequest.hasSnapshotDirSize()) { - SnapshotSize snapshotSize = setSnapshotPropertyRequest.getSnapshotDirSize(); + if (setSnapshotPropertyRequest.hasSnapshotSizeDeltaFromDirDeepCleaning()) { + SnapshotSize snapshotSize = setSnapshotPropertyRequest.getSnapshotSizeDeltaFromDirDeepCleaning(); // Set Exclusive size. snapInfo.setExclusiveSizeDeltaFromDirDeepCleaning(snapshotSize.getExclusiveSize()); snapInfo.setExclusiveReplicatedSizeDeltaFromDirDeepCleaning(snapshotSize.getExclusiveReplicatedSize()); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java index 3a307804555e..bb985da55979 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java @@ -345,7 +345,7 @@ private void updateExclusiveSize(String prevSnapshotKeyTable) throws IOException setSnapshotPropertyRequest = SetSnapshotPropertyRequest.newBuilder() .setSnapshotKey(prevSnapshotKeyTable) - .setSnapshotDirSize(snapshotSize) + .setSnapshotSizeDeltaFromDirDeepCleaning(snapshotSize) .build(); OMRequest omRequest = OMRequest.newBuilder() From b86ec7e08c7457b5076a8a040ce9da275117e8fd Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Tue, 13 May 2025 19:26:51 -0400 Subject: [PATCH 08/20] HDDS-13031. Remove Lock set bits for SNAPSHOT_GC_LOCK Change-Id: Id3202dbe7a5a71c43526e80af34b18dd7b2ed66e --- .../org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java index 07b5d7938ea9..2b2de20698ee 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java @@ -587,7 +587,7 @@ public enum Resource { KEY_PATH_LOCK((byte) 5, "KEY_PATH_LOCK"), //63 PREFIX_LOCK((byte) 6, "PREFIX_LOCK"), //127 SNAPSHOT_LOCK((byte) 7, "SNAPSHOT_LOCK"), // = 255 - SNAPSHOT_GC_LOCK((byte) 8, "SNAPSHOT_GC_LOCK"); + SNAPSHOT_GC_LOCK("SNAPSHOT_GC_LOCK"); // level of the resource private byte lockLevel; @@ -667,6 +667,12 @@ long getStartWriteHeldTimeNanos() { this.name = name; } + Resource(String name) { + this.name = name; + this.mask = 0; + this.setMask = 0; + } + boolean canLock(short lockSetVal) { // For USER_LOCK, S3_SECRET_LOCK and PREFIX_LOCK we shall not allow From 617cf00c7e8b8d074b78e5c39729a5075a9b0bba Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Tue, 13 May 2025 20:41:36 -0400 Subject: [PATCH 09/20] HDDS-13026. KeyDeleting service should also delete RenameEntries Change-Id: I17c1771c7630292fded0eebfbe6ee14f97798506 --- .../hadoop/ozone/om/TestKeyPurging.java | 2 +- ...napshotDeletingServiceIntegrationTest.java | 15 +- .../TestSnapshotDirectoryCleaningService.java | 1 - .../src/main/proto/OmClientProtocol.proto | 1 + .../apache/hadoop/ozone/om/KeyManager.java | 35 +- .../hadoop/ozone/om/KeyManagerImpl.java | 84 ++- .../ozone/om/OmMetadataManagerImpl.java | 156 ------ .../hadoop/ozone/om/PendingKeysDeletion.java | 8 +- .../snapshot/OMSnapshotPurgeRequest.java | 1 + .../service/AbstractKeyDeletingService.java | 71 ++- .../ozone/om/service/KeyDeletingService.java | 499 ++++++------------ .../om/service/SnapshotDeletingService.java | 4 +- .../hadoop/ozone/om/TestKeyManagerImpl.java | 32 +- .../om/service/TestKeyDeletingService.java | 49 +- 14 files changed, 403 insertions(+), 555 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java index 17d4d40a0967..fa59754b67f2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java @@ -126,7 +126,7 @@ public void testKeysPurgingByKeyDeletingService() throws Exception { GenericTestUtils.waitFor( () -> { try { - return keyManager.getPendingDeletionKeys(Integer.MAX_VALUE) + return keyManager.getPendingDeletionKeys((kv) -> true, Integer.MAX_VALUE) .getKeyBlocksList().isEmpty(); } catch (IOException e) { return false; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingServiceIntegrationTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingServiceIntegrationTest.java index dddfd9717749..cd7d81e75b47 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingServiceIntegrationTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingServiceIntegrationTest.java @@ -25,6 +25,7 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_DEEP_CLEANING_ENABLED; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyString; @@ -497,7 +498,7 @@ private KeyDeletingService getMockedKeyDeletingService(AtomicBoolean keyDeletion keyDeletingService.shutdown(); GenericTestUtils.waitFor(() -> keyDeletingService.getThreadCount() == 0, 1000, 100000); - when(keyManager.getPendingDeletionKeys(anyInt())).thenAnswer(i -> { + when(keyManager.getPendingDeletionKeys(any(), anyInt())).thenAnswer(i -> { // wait for SDS to reach the KDS wait block before processing any key. GenericTestUtils.waitFor(keyDeletionWaitStarted::get, 1000, 100000); keyDeletionStarted.set(true); @@ -616,9 +617,9 @@ public void testParallelExcecutionOfKeyDeletionAndSnapshotDeletion() throws Exce try (ReferenceCounted snapshot = om.getOmSnapshotManager().getSnapshot(testBucket.getVolumeName(), testBucket.getName(), testBucket.getName() + "snap2")) { renamesKeyEntries = snapshot.get().getKeyManager().getRenamesKeyEntries(testBucket.getVolumeName(), - testBucket.getName(), "", 1000); + testBucket.getName(), "", (kv) -> true, 1000); deletedKeyEntries = snapshot.get().getKeyManager().getDeletedKeyEntries(testBucket.getVolumeName(), - testBucket.getName(), "", 1000); + testBucket.getName(), "", (kv) -> true, 1000); deletedDirEntries = snapshot.get().getKeyManager().getDeletedDirEntries(testBucket.getVolumeName(), testBucket.getName(), 1000); } @@ -653,20 +654,20 @@ public void testParallelExcecutionOfKeyDeletionAndSnapshotDeletion() throws Exce testBucket.getName(), testBucket.getName() + "snap2")) { Assertions.assertEquals(Collections.emptyList(), snapshot.get().getKeyManager().getRenamesKeyEntries(testBucket.getVolumeName(), - testBucket.getName(), "", 1000)); + testBucket.getName(), "", (kv) -> true, 1000)); Assertions.assertEquals(Collections.emptyList(), snapshot.get().getKeyManager().getDeletedKeyEntries(testBucket.getVolumeName(), - testBucket.getName(), "", 1000)); + testBucket.getName(), "", (kv) -> true, 1000)); Assertions.assertEquals(Collections.emptyList(), snapshot.get().getKeyManager().getDeletedDirEntries(testBucket.getVolumeName(), testBucket.getName(), 1000)); } List> aosRenamesKeyEntries = om.getKeyManager().getRenamesKeyEntries(testBucket.getVolumeName(), - testBucket.getName(), "", 1000); + testBucket.getName(), "", (kv) -> true, 1000); List>> aosDeletedKeyEntries = om.getKeyManager().getDeletedKeyEntries(testBucket.getVolumeName(), - testBucket.getName(), "", 1000); + testBucket.getName(), "", (kv) -> true, 1000); List> aosDeletedDirEntries = om.getKeyManager().getDeletedDirEntries(testBucket.getVolumeName(), testBucket.getName(), 1000); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDirectoryCleaningService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDirectoryCleaningService.java index 84307c55490a..8591c6d1e88b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDirectoryCleaningService.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDirectoryCleaningService.java @@ -240,7 +240,6 @@ public void testExclusiveSizeWithDirectoryDeepClean() throws Exception { Table.KeyValue snapshotEntry = iterator.next(); String snapshotName = snapshotEntry.getValue().getName(); SnapshotInfo snapshotInfo = snapshotInfoTable.get(snapshotEntry.getKey()); - System.out.println(snapshotInfo.getName() + " " + snapshotInfo.getDeepCleanedDeletedDir()); assertEquals(expectedSize.get(snapshotName), snapshotInfo.getExclusiveSize() + snapshotInfo.getExclusiveSizeDeltaFromDirDeepCleaning()); // Since for the test we are using RATIS/THREE diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index b56912ff6d2c..4c3a6db8bae2 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -1392,6 +1392,7 @@ message PurgeKeysRequest { repeated SnapshotMoveKeyInfos keysToUpdate = 3; // previous snapshotID can also be null & this field would be absent in older requests. optional NullableUUID expectedPreviousSnapshotID = 4; + repeated string renamedKeys = 5; } message PurgeKeysResponse { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java index fa3e622313df..a067551be1c6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java @@ -37,6 +37,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList; import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.service.CompactionService; import org.apache.hadoop.ozone.om.service.DirectoryDeletingService; import org.apache.hadoop.ozone.om.service.KeyDeletingService; @@ -123,18 +124,39 @@ ListKeysResult listKeys(String volumeName, String bucketName, String startKey, * and a hashmap for key-value pair to be updated in the deletedTable. * @throws IOException */ - PendingKeysDeletion getPendingDeletionKeys(int count) throws IOException; + PendingKeysDeletion getPendingDeletionKeys( + CheckedFunction, Boolean, IOException> filter, int count) + throws IOException; + + /** + * Returns a PendingKeysDeletion. It has a list of pending deletion key info + * that ups to the given count.Each entry is a {@link BlockGroup}, which + * contains the info about the key name and all its associated block IDs. + * Second is a Mapping of Key-Value pair which is updated in the deletedTable. + * + * @param count max number of keys to return. + * @return a Pair of list of {@link BlockGroup} representing keys and blocks, + * and a hashmap for key-value pair to be updated in the deletedTable. + * @throws IOException + */ + PendingKeysDeletion getPendingDeletionKeys( + String volume, String bucket, String startKey, + CheckedFunction, Boolean, IOException> filter, int count) + throws IOException; /** * Returns a list rename entries from the snapshotRenamedTable. * - * @param size max number of keys to return. + * @param count max number of keys to return. + * @param filter filter to apply on the entries. * @return a Pair of list of {@link org.apache.hadoop.hdds.utils.db.Table.KeyValue} representing the keys in the * underlying metadataManager. * @throws IOException */ List> getRenamesKeyEntries( - String volume, String bucket, String startKey, int size) throws IOException; + String volume, String bucket, String startKey, + CheckedFunction, Boolean, IOException> filter, int count) + throws IOException; /** @@ -158,13 +180,16 @@ CheckedFunction getPreviousSnapshotOzoneKeyI /** * Returns a list deleted entries from the deletedTable. * - * @param size max number of keys to return. + * @param count max number of keys to return. + * @param filter filter to apply on the entries. * @return a Pair of list of {@link org.apache.hadoop.hdds.utils.db.Table.KeyValue} representing the keys in the * underlying metadataManager. * @throws IOException */ List>> getDeletedKeyEntries( - String volume, String bucket, String startKey, int size) throws IOException; + String volume, String bucket, String startKey, + CheckedFunction, Boolean, IOException> filter, + int count) throws IOException; /** * Returns the names of up to {@code count} open keys whose age is diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index 96673113771b..e42aad0dcfb4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -113,6 +113,7 @@ import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion; import org.apache.hadoop.fs.FileEncryptionInfo; import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; @@ -136,6 +137,7 @@ import org.apache.hadoop.net.TableMapping; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.common.BlockGroup; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; import org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo; @@ -172,6 +174,7 @@ import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.ozone.security.acl.RequestContext; import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.Time; import org.apache.ratis.util.function.CheckedFunction; @@ -722,17 +725,73 @@ public ListKeysResult listKeys(String volumeName, String bucketName, } @Override - public PendingKeysDeletion getPendingDeletionKeys(final int count) + public PendingKeysDeletion getPendingDeletionKeys( + final CheckedFunction, Boolean, IOException> filter, final int count) throws IOException { - OmMetadataManagerImpl omMetadataManager = - (OmMetadataManagerImpl) metadataManager; - return omMetadataManager - .getPendingDeletionKeys(count, ozoneManager.getOmSnapshotManager()); + return getPendingDeletionKeys(null, null, null, filter, count); + } + + @Override + public PendingKeysDeletion getPendingDeletionKeys( + String volume, String bucket, String startKey, + CheckedFunction, Boolean, IOException> filter, + int count) throws IOException { + List keyBlocksList = Lists.newArrayList(); + Map keysToModify = new HashMap<>(); + // Bucket prefix would be empty if volume is empty i.e. either null or "". + Optional bucketPrefix = getBucketPrefix(volume, bucket, false); + try (TableIterator> + delKeyIter = metadataManager.getDeletedTable().iterator(bucketPrefix.orElse(""))) { + + /* Seeking to the start key if it not null. The next key picked up would be ensured to start with the bucket + prefix, {@link org.apache.hadoop.hdds.utils.db.Table#iterator(bucketPrefix)} would ensure this. + */ + if (startKey != null) { + delKeyIter.seek(startKey); + } + int currentCount = 0; + while (delKeyIter.hasNext() && currentCount < count) { + RepeatedOmKeyInfo notReclaimableKeyInfo = new RepeatedOmKeyInfo(); + Table.KeyValue kv = delKeyIter.next(); + if (kv != null) { + List blockGroupList = Lists.newArrayList(); + // Multiple keys with the same path can be queued in one DB entry + RepeatedOmKeyInfo infoList = kv.getValue(); + for (OmKeyInfo info : infoList.cloneOmKeyInfoList()) { + + // Skip the key if the filter doesn't allow the file to be deleted. + if (filter == null || filter.apply(Table.newKeyValue(kv.getKey(), info))) { + List blockIDS = info.getKeyLocationVersions().stream() + .flatMap(versionLocations -> versionLocations.getLocationList().stream() + .map(b -> new BlockID(b.getContainerID(), b.getLocalID()))).collect(Collectors.toList()); + BlockGroup keyBlocks = BlockGroup.newBuilder().setKeyName(kv.getKey()) + .addAllBlockIDs(blockIDS).build(); + blockGroupList.add(keyBlocks); + currentCount++; + } else { + notReclaimableKeyInfo.addOmKeyInfo(info); + } + } + + List notReclaimableKeyInfoList = notReclaimableKeyInfo.getOmKeyInfoList(); + + // If all the versions are not reclaimable, then modify key by just purging the key that can be purged. + if (notReclaimableKeyInfoList.size() > 0 && + notReclaimableKeyInfoList.size() != infoList.getOmKeyInfoList().size()) { + keysToModify.put(kv.getKey(), notReclaimableKeyInfo); + } + keyBlocksList.addAll(blockGroupList); + } + } + } + return new PendingKeysDeletion(keyBlocksList, keysToModify); } private List> getTableEntries(String startKey, TableIterator> tableIterator, - Function valueFunction, int size) throws IOException { + Function valueFunction, + CheckedFunction, Boolean, IOException> filter, + int size) throws IOException { List> entries = new ArrayList<>(); /* Seek to the start key if it's not null. The next key in queue is ensured to start with the bucket prefix, {@link org.apache.hadoop.hdds.utils.db.Table#iterator(bucketPrefix)} would ensure this. @@ -745,7 +804,7 @@ private List> getTableEntries(String startKey, int currentCount = 0; while (tableIterator.hasNext() && currentCount < size) { Table.KeyValue kv = tableIterator.next(); - if (kv != null) { + if (kv != null && filter.apply(kv)) { entries.add(Table.newKeyValue(kv.getKey(), valueFunction.apply(kv.getValue()))); currentCount++; } @@ -767,11 +826,12 @@ private Optional getBucketPrefix(String volumeName, String bucketName, b @Override public List> getRenamesKeyEntries( - String volume, String bucket, String startKey, int size) throws IOException { + String volume, String bucket, String startKey, + CheckedFunction, Boolean, IOException> filter, int size) throws IOException { Optional bucketPrefix = getBucketPrefix(volume, bucket, false); try (TableIterator> renamedKeyIter = metadataManager.getSnapshotRenamedTable().iterator(bucketPrefix.orElse(""))) { - return getTableEntries(startKey, renamedKeyIter, Function.identity(), size); + return getTableEntries(startKey, renamedKeyIter, Function.identity(), filter, size); } } @@ -815,11 +875,13 @@ private CheckedFunction getPreviousSnapshotOzone @Override public List>> getDeletedKeyEntries( - String volume, String bucket, String startKey, int size) throws IOException { + String volume, String bucket, String startKey, + CheckedFunction, Boolean, IOException> filter, + int size) throws IOException { Optional bucketPrefix = getBucketPrefix(volume, bucket, false); try (TableIterator> delKeyIter = metadataManager.getDeletedTable().iterator(bucketPrefix.orElse(""))) { - return getTableEntries(startKey, delKeyIter, RepeatedOmKeyInfo::cloneOmKeyInfoList, size); + return getTableEntries(startKey, delKeyIter, RepeatedOmKeyInfo::cloneOmKeyInfoList, filter, size); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index 7a4b6ecdb2e3..e1f50b1922f1 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -34,7 +34,6 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.checkSnapshotDirExist; -import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.isBlockLocationInfoSame; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Strings; @@ -55,7 +54,6 @@ import java.util.Map; import java.util.NoSuchElementException; import java.util.Objects; -import java.util.Optional; import java.util.Set; import java.util.SortedMap; import java.util.TreeMap; @@ -113,7 +111,6 @@ import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.om.request.util.OMMultipartUploadUtils; -import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ExpiredMultipartUploadInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ExpiredMultipartUploadsBucket; @@ -1311,159 +1308,6 @@ private PersistedUserVolumeInfo getVolumesByUser(String userNameKey) } } - /** - * Returns a list of pending deletion key info up to the limit. - * Each entry is a {@link BlockGroup}, which contains the info about the key - * name and all its associated block IDs. - * - * @param keyCount max number of keys to return. - * @param omSnapshotManager SnapshotManager - * @return a list of {@link BlockGroup} represent keys and blocks. - * @throws IOException - */ - public PendingKeysDeletion getPendingDeletionKeys(final int keyCount, - OmSnapshotManager omSnapshotManager) - throws IOException { - List keyBlocksList = Lists.newArrayList(); - HashMap keysToModify = new HashMap<>(); - try (TableIterator> - keyIter = getDeletedTable().iterator()) { - int currentCount = 0; - while (keyIter.hasNext() && currentCount < keyCount) { - RepeatedOmKeyInfo notReclaimableKeyInfo = new RepeatedOmKeyInfo(); - KeyValue kv = keyIter.next(); - if (kv != null) { - List blockGroupList = Lists.newArrayList(); - // Get volume name and bucket name - String[] keySplit = kv.getKey().split(OM_KEY_PREFIX); - String bucketKey = getBucketKey(keySplit[1], keySplit[2]); - OmBucketInfo bucketInfo = getBucketTable().get(bucketKey); - // If Bucket deleted bucketInfo would be null, thus making previous snapshot also null. - SnapshotInfo previousSnapshotInfo = bucketInfo == null ? null : - SnapshotUtils.getLatestSnapshotInfo(bucketInfo.getVolumeName(), - bucketInfo.getBucketName(), ozoneManager, snapshotChainManager); - // previous snapshot is not active or it has not been flushed to disk then don't process the key in this - // iteration. - if (previousSnapshotInfo != null && - (previousSnapshotInfo.getSnapshotStatus() != SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE || - !OmSnapshotManager.areSnapshotChangesFlushedToDB(ozoneManager.getMetadataManager(), - previousSnapshotInfo))) { - continue; - } - // Get the latest snapshot in snapshot path. - try (ReferenceCounted rcLatestSnapshot = previousSnapshotInfo == null ? null : - omSnapshotManager.getSnapshot(previousSnapshotInfo.getVolumeName(), - previousSnapshotInfo.getBucketName(), previousSnapshotInfo.getName())) { - - // Multiple keys with the same path can be queued in one DB entry - RepeatedOmKeyInfo infoList = kv.getValue(); - for (OmKeyInfo info : infoList.cloneOmKeyInfoList()) { - // Skip the key if it exists in the previous snapshot (of the same - // scope) as in this case its blocks should not be reclaimed - - // If the last snapshot is deleted and the keys renamed in between - // the snapshots will be cleaned up by KDS. So we need to check - // in the renamedTable as well. - String dbRenameKey = getRenameKey(info.getVolumeName(), - info.getBucketName(), info.getObjectID()); - - if (rcLatestSnapshot != null) { - Table prevKeyTable = - rcLatestSnapshot.get() - .getMetadataManager() - .getKeyTable(bucketInfo.getBucketLayout()); - - Table prevDeletedTable = - rcLatestSnapshot.get().getMetadataManager().getDeletedTable(); - String prevKeyTableDBKey = getSnapshotRenamedTable() - .get(dbRenameKey); - String prevDelTableDBKey = getOzoneKey(info.getVolumeName(), - info.getBucketName(), info.getKeyName()); - // format: /volName/bucketName/keyName/objId - prevDelTableDBKey = getOzoneDeletePathKey(info.getObjectID(), - prevDelTableDBKey); - - if (prevKeyTableDBKey == null && - bucketInfo.getBucketLayout().isFileSystemOptimized()) { - long volumeId = getVolumeId(info.getVolumeName()); - prevKeyTableDBKey = getOzonePathKey(volumeId, - bucketInfo.getObjectID(), - info.getParentObjectID(), - info.getFileName()); - } else if (prevKeyTableDBKey == null) { - prevKeyTableDBKey = getOzoneKey(info.getVolumeName(), - info.getBucketName(), - info.getKeyName()); - } - - OmKeyInfo omKeyInfo = prevKeyTable.get(prevKeyTableDBKey); - // When key is deleted it is no longer in keyTable, we also - // have to check deletedTable of previous snapshot - RepeatedOmKeyInfo delOmKeyInfo = - prevDeletedTable.get(prevDelTableDBKey); - if (versionExistsInPreviousSnapshot(omKeyInfo, - info, delOmKeyInfo)) { - // If the infoList size is 1, there is nothing to split. - // We either delete it or skip it. - if (!(infoList.getOmKeyInfoList().size() == 1)) { - notReclaimableKeyInfo.addOmKeyInfo(info); - } - continue; - } - } - - // Add all blocks from all versions of the key to the deletion - // list - for (OmKeyLocationInfoGroup keyLocations : - info.getKeyLocationVersions()) { - List item = keyLocations.getLocationList().stream() - .map(b -> new BlockID(b.getContainerID(), b.getLocalID())) - .collect(Collectors.toList()); - BlockGroup keyBlocks = BlockGroup.newBuilder() - .setKeyName(kv.getKey()) - .addAllBlockIDs(item) - .build(); - blockGroupList.add(keyBlocks); - } - currentCount++; - } - - List notReclaimableKeyInfoList = - notReclaimableKeyInfo.getOmKeyInfoList(); - // If Bucket deleted bucketInfo would be null, thus making previous snapshot also null. - SnapshotInfo newPreviousSnapshotInfo = bucketInfo == null ? null : - SnapshotUtils.getLatestSnapshotInfo(bucketInfo.getVolumeName(), - bucketInfo.getBucketName(), ozoneManager, snapshotChainManager); - // Check if the previous snapshot in the chain hasn't changed. - if (Objects.equals(Optional.ofNullable(newPreviousSnapshotInfo).map(SnapshotInfo::getSnapshotId), - Optional.ofNullable(previousSnapshotInfo).map(SnapshotInfo::getSnapshotId))) { - // If all the versions are not reclaimable, then do nothing. - if (!notReclaimableKeyInfoList.isEmpty() && - notReclaimableKeyInfoList.size() != - infoList.getOmKeyInfoList().size()) { - keysToModify.put(kv.getKey(), notReclaimableKeyInfo); - } - - if (notReclaimableKeyInfoList.size() != - infoList.getOmKeyInfoList().size()) { - keyBlocksList.addAll(blockGroupList); - } - } - } - } - } - } - return new PendingKeysDeletion(keyBlocksList, keysToModify); - } - - private boolean versionExistsInPreviousSnapshot(OmKeyInfo omKeyInfo, - OmKeyInfo info, RepeatedOmKeyInfo delOmKeyInfo) { - return (omKeyInfo != null && - info.getObjectID() == omKeyInfo.getObjectID() && - isBlockLocationInfoSame(omKeyInfo, info)) || - delOmKeyInfo != null; - } - /** * Decide whether the open key is a multipart upload related key. * @param openKeyInfo open key related to multipart upload diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PendingKeysDeletion.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PendingKeysDeletion.java index 7af213f8f1c6..f8f47c9f33ad 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PendingKeysDeletion.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PendingKeysDeletion.java @@ -17,8 +17,8 @@ package org.apache.hadoop.ozone.om; -import java.util.HashMap; import java.util.List; +import java.util.Map; import org.apache.hadoop.ozone.common.BlockGroup; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; @@ -27,16 +27,16 @@ */ public class PendingKeysDeletion { - private HashMap keysToModify; + private Map keysToModify; private List keyBlocksList; public PendingKeysDeletion(List keyBlocksList, - HashMap keysToModify) { + Map keysToModify) { this.keysToModify = keysToModify; this.keyBlocksList = keyBlocksList; } - public HashMap getKeysToModify() { + public Map getKeysToModify() { return keysToModify; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java index dcbf3ad65bd5..f0f828ac617d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java @@ -140,6 +140,7 @@ private void updateSnapshotInfoAndCache(SnapshotInfo snapInfo, OmMetadataManager // current snapshot is deleted. We can potentially // reclaim more keys in the next snapshot. snapInfo.setDeepClean(false); + snapInfo.setDeepCleanedDeletedDir(false); // Update table cache first omMetadataManager.getSnapshotInfoTable().addCacheEntry(new CacheKey<>(snapInfo.getTableKey()), diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java index 7dcb696e06b3..0df1f40b769c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java @@ -101,13 +101,14 @@ public AbstractKeyDeletingService(String serviceName, long interval, this.callId = new AtomicLong(0); } - protected int processKeyDeletes(List keyBlocksList, + protected Pair processKeyDeletes(List keyBlocksList, KeyManager manager, - HashMap keysToModify, + Map keysToModify, + List renameEntries, String snapTableKey, UUID expectedPreviousSnapshotId) throws IOException { long startTime = Time.monotonicNow(); - int delCount = 0; + Pair purgeResult = Pair.of(0, false); if (LOG.isDebugEnabled()) { LOG.debug("Send {} key(s) to SCM: {}", keyBlocksList.size(), keyBlocksList); @@ -125,15 +126,15 @@ protected int processKeyDeletes(List keyBlocksList, keyBlocksList.size(), Time.monotonicNow() - startTime); if (blockDeletionResults != null) { long purgeStartTime = Time.monotonicNow(); - delCount = submitPurgeKeysRequest(blockDeletionResults, - keysToModify, snapTableKey, expectedPreviousSnapshotId); + purgeResult = submitPurgeKeysRequest(blockDeletionResults, + keysToModify, renameEntries, snapTableKey, expectedPreviousSnapshotId); int limit = ozoneManager.getConfiguration().getInt(OMConfigKeys.OZONE_KEY_DELETING_LIMIT_PER_TASK, OMConfigKeys.OZONE_KEY_DELETING_LIMIT_PER_TASK_DEFAULT); LOG.info("Blocks for {} (out of {}) keys are deleted from DB in {} ms. Limit per task is {}.", - delCount, blockDeletionResults.size(), Time.monotonicNow() - purgeStartTime, limit); + purgeResult, blockDeletionResults.size(), Time.monotonicNow() - purgeStartTime, limit); } perfMetrics.setKeyDeletingServiceLatencyMs(Time.monotonicNow() - startTime); - return delCount; + return purgeResult; } /** @@ -142,13 +143,14 @@ protected int processKeyDeletes(List keyBlocksList, * @param results DeleteBlockGroups returned by SCM. * @param keysToModify Updated list of RepeatedOmKeyInfo */ - private int submitPurgeKeysRequest(List results, - HashMap keysToModify, String snapTableKey, UUID expectedPreviousSnapshotId) { - Map, List> purgeKeysMapPerBucket = - new HashMap<>(); + private Pair submitPurgeKeysRequest(List results, + Map keysToModify, List renameEntriesToBeDeleted, + String snapTableKey, UUID expectedPreviousSnapshotId) { + Map, List> purgeKeysMapPerBucket = new HashMap<>(); // Put all keys to be purged in a list int deletedCount = 0; + boolean purgeSuccess = true; for (DeleteBlockGroupResult result : results) { if (result.isSuccess()) { // Add key to PurgeKeys list. @@ -167,6 +169,8 @@ private int submitPurgeKeysRequest(List results, } } deletedCount++; + } else { + purgeSuccess = false; } } @@ -192,6 +196,11 @@ private int submitPurgeKeysRequest(List results, .build(); purgeKeysRequest.addDeletedKeys(deletedKeysInBucket); } + // Adding rename entries to be purged. + if (renameEntriesToBeDeleted != null) { + purgeKeysRequest.addAllRenamedKeys(renameEntriesToBeDeleted); + } + List keysToUpdateList = new ArrayList<>(); if (keysToModify != null) { @@ -222,13 +231,16 @@ private int submitPurgeKeysRequest(List results, // Submit PurgeKeys request to OM try { - submitRequest(omRequest); + OzoneManagerProtocolProtos.OMResponse omResponse = submitRequest(omRequest); + if (omResponse != null) { + purgeSuccess = purgeSuccess && omResponse.getSuccess(); + } } catch (ServiceException e) { LOG.error("PurgeKey request failed. Will retry at next run.", e); - return 0; + return Pair.of(0, false); } - return deletedCount; + return Pair.of(deletedCount, purgeSuccess); } protected OzoneManagerProtocolProtos.OMResponse submitRequest(OMRequest omRequest) throws ServiceException { @@ -658,4 +670,35 @@ public long getMovedFilesCount() { public BootstrapStateHandler.Lock getBootstrapStateLock() { return lock; } + + /** + * Submits SetSnapsnapshotPropertyRequest to OM. + * @param setSnapshotPropertyRequests request to be sent to OM + */ + protected void submitSetSnapshotRequest( + List setSnapshotPropertyRequests) { + if (setSnapshotPropertyRequests.isEmpty()) { + return; + } + OzoneManagerProtocolProtos.OMRequest omRequest = OzoneManagerProtocolProtos.OMRequest.newBuilder() + .setCmdType(OzoneManagerProtocolProtos.Type.SetSnapshotProperty) + .addAllSetSnapshotPropertyRequests(setSnapshotPropertyRequests) + .setClientId(clientId.toString()) + .build(); + Map val = new HashMap<>(); + setSnapshotPropertyRequests + .forEach(i -> { + try { + val.put(i.getSnapshotKey(), + ozoneManager.getMetadataManager().getSnapshotInfoTable().get(i.getSnapshotKey())); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + try { + submitRequest(omRequest); + } catch (ServiceException e) { + LOG.error("Failed to submit set snapshot property request", e); + } + } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java index 98d5a2f93c3c..8b927f4fef33 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java @@ -19,34 +19,29 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_KEY_DELETING_LIMIT_PER_TASK; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_KEY_DELETING_LIMIT_PER_TASK_DEFAULT; -import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; -import com.google.protobuf.ServiceException; import java.io.IOException; +import java.io.UncheckedIOException; import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; +import java.util.Collection; import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.Optional; -import java.util.Set; import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; import org.apache.hadoop.hdds.utils.BackgroundTask; import org.apache.hadoop.hdds.utils.BackgroundTaskQueue; import org.apache.hadoop.hdds.utils.BackgroundTaskResult; import org.apache.hadoop.hdds.utils.BackgroundTaskResult.EmptyTaskResult; -import org.apache.hadoop.hdds.utils.IOUtils; -import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.hdds.utils.db.TableIterator; -import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.common.BlockGroup; import org.apache.hadoop.ozone.om.DeletingServiceMetrics; import org.apache.hadoop.ozone.om.KeyManager; @@ -56,18 +51,14 @@ import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.PendingKeysDeletion; import org.apache.hadoop.ozone.om.SnapshotChainManager; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; -import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; +import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.om.snapshot.filter.ReclaimableKeyFilter; +import org.apache.hadoop.ozone.om.snapshot.filter.ReclaimableRenameEntryFilter; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetSnapshotPropertyRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotSize; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; -import org.apache.ratis.protocol.ClientId; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -90,10 +81,6 @@ public class KeyDeletingService extends AbstractKeyDeletingService { private int keyLimitPerTask; private final AtomicLong deletedKeyCount; private final AtomicBoolean suspended; - private final Map exclusiveSizeMap; - private final Map exclusiveReplicatedSizeMap; - private final Set completedExclusiveSizeSet; - private final Map snapshotSeekMap; private AtomicBoolean isRunningOnAOS; private final boolean deepCleanSnapshots; private final SnapshotChainManager snapshotChainManager; @@ -114,10 +101,6 @@ public KeyDeletingService(OzoneManager ozoneManager, OZONE_KEY_DELETING_LIMIT_PER_TASK + " cannot be negative."); this.deletedKeyCount = new AtomicLong(0); this.suspended = new AtomicBoolean(false); - this.exclusiveSizeMap = new HashMap<>(); - this.exclusiveReplicatedSizeMap = new HashMap<>(); - this.completedExclusiveSizeSet = new HashSet<>(); - this.snapshotSeekMap = new HashMap<>(); this.isRunningOnAOS = new AtomicBoolean(false); this.deepCleanSnapshots = deepCleanSnapshots; this.snapshotChainManager = ((OmMetadataManagerImpl)manager.getMetadataManager()).getSnapshotChainManager(); @@ -191,6 +174,122 @@ private KeyDeletingTask(KeyDeletingService service) { this.deletingService = service; } + private OzoneManagerProtocolProtos.SetSnapshotPropertyRequest getSetSnapshotRequestUpdatingExclusiveSize( + Map exclusiveSizeMap, Map exclusiveReplicatedSizeMap, UUID snapshotID) { + OzoneManagerProtocolProtos.SnapshotSize snapshotSize = OzoneManagerProtocolProtos.SnapshotSize.newBuilder() + .setExclusiveSize( + exclusiveSizeMap.getOrDefault(snapshotID, 0L)) + .setExclusiveReplicatedSize( + exclusiveReplicatedSizeMap.getOrDefault( + snapshotID, 0L)) + .build(); + exclusiveSizeMap.remove(snapshotID); + exclusiveReplicatedSizeMap.remove(snapshotID); + + return OzoneManagerProtocolProtos.SetSnapshotPropertyRequest.newBuilder() + .setSnapshotKey(snapshotChainManager.getTableKey(snapshotID)) + .setSnapshotSize(snapshotSize) + .build(); + } + + /** + * + * @param currentSnapshotInfo if null, deleted directories in AOS should be processed. + * @param keyManager KeyManager of the underlying store. + */ + private int processDeletedKeysForStore(SnapshotInfo currentSnapshotInfo, KeyManager keyManager, + int remainNum) throws IOException { + String volume = currentSnapshotInfo == null ? null : currentSnapshotInfo.getVolumeName(); + String bucket = currentSnapshotInfo == null ? null : currentSnapshotInfo.getBucketName(); + String snapshotTableKey = currentSnapshotInfo == null ? null : currentSnapshotInfo.getTableKey(); + + String startKey = null; + boolean successStatus = true; + try { + // TODO: [SNAPSHOT] HDDS-7968. Reclaim eligible key blocks in + // snapshot's deletedTable when active DB's deletedTable + // doesn't have enough entries left. + // OM would have to keep track of which snapshot the key is coming + // from if the above would be done inside getPendingDeletionKeys(). + OmSnapshotManager omSnapshotManager = getOzoneManager().getOmSnapshotManager(); + // This is to avoid race condition b/w purge request and snapshot chain update. For AOS taking the global + // snapshotId since AOS could process multiple buckets in one iteration. While using path + // previous snapshotId for a snapshot since it would process only one bucket. + UUID expectedPreviousSnapshotId = currentSnapshotInfo == null ? + snapshotChainManager.getLatestGlobalSnapshotId() : + SnapshotUtils.getPreviousSnapshotId(currentSnapshotInfo, snapshotChainManager); + + IOzoneManagerLock lock = getOzoneManager().getMetadataManager().getLock(); + + // Purge deleted Keys in the deletedTable && rename entries in the snapshotRenamedTable which doesn't have a + // reference in the previous snapshot. + try (ReclaimableKeyFilter reclaimableKeyFilter = new ReclaimableKeyFilter(getOzoneManager(), + omSnapshotManager, snapshotChainManager, currentSnapshotInfo, keyManager, lock); + ReclaimableRenameEntryFilter renameEntryFilter = new ReclaimableRenameEntryFilter( + getOzoneManager(), omSnapshotManager, snapshotChainManager, currentSnapshotInfo, + keyManager, lock)) { + List renamedTableEntries = + keyManager.getRenamesKeyEntries(volume, bucket, startKey, renameEntryFilter, remainNum).stream() + .map(entry -> { + try { + return entry.getKey(); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + }).collect(Collectors.toList()); + remainNum -= renamedTableEntries.size(); + + // Get pending keys that can be deleted + PendingKeysDeletion pendingKeysDeletion = keyManager.getPendingDeletionKeys(volume, bucket, startKey, + reclaimableKeyFilter, remainNum); + List keyBlocksList = pendingKeysDeletion.getKeyBlocksList(); + //submit purge requests if there are renamed entries to be purged or keys to be purged. + if (!renamedTableEntries.isEmpty() || keyBlocksList != null && !keyBlocksList.isEmpty()) { + // Validating if the previous snapshot is still the same before purging the blocks. + SnapshotUtils.validatePreviousSnapshotId(currentSnapshotInfo, snapshotChainManager, + expectedPreviousSnapshotId); + Pair purgeResult = processKeyDeletes(keyBlocksList, keyManager, + pendingKeysDeletion.getKeysToModify(), renamedTableEntries, snapshotTableKey, + expectedPreviousSnapshotId); + remainNum -= purgeResult.getKey(); + successStatus = purgeResult.getValue(); + if (successStatus) { + deletedKeyCount.addAndGet(purgeResult.getKey()); + } + } + + // Checking remainNum is greater than zero and not equal to the initial value if there were some keys to + // reclaim. This is to check if + if (remainNum > 0 && successStatus) { + List setSnapshotPropertyRequests = new ArrayList<>(); + Map exclusiveReplicatedSizeMap = reclaimableKeyFilter.getExclusiveReplicatedSizeMap(); + Map exclusiveSizeMap = reclaimableKeyFilter.getExclusiveSizeMap(); + List previousPathSnapshotsInChain = + Stream.of(exclusiveSizeMap.keySet(), exclusiveReplicatedSizeMap.keySet()) + .flatMap(Collection::stream).distinct().collect(Collectors.toList()); + for (UUID snapshot : previousPathSnapshotsInChain) { + setSnapshotPropertyRequests.add(getSetSnapshotRequestUpdatingExclusiveSize(exclusiveSizeMap, + exclusiveReplicatedSizeMap, snapshot)); + } + + //Updating directory deep clean flag of snapshot. + if (currentSnapshotInfo != null) { + setSnapshotPropertyRequests.add(OzoneManagerProtocolProtos.SetSnapshotPropertyRequest.newBuilder() + .setSnapshotKey(snapshotTableKey) + .setDeepCleanedDeletedKey(true) + .build()); + } + submitSetSnapshotRequest(setSnapshotPropertyRequests); + } + } + } catch (IOException e) { + throw e; + } catch (UncheckedIOException e) { + throw e.getCause(); + } + return remainNum; + } + @Override public int getPriority() { return 0; @@ -204,325 +303,65 @@ public BackgroundTaskResult call() { final long run = getRunCount().incrementAndGet(); LOG.debug("Running KeyDeletingService {}", run); isRunningOnAOS.set(true); - int delCount = 0; + int remainNum = keyLimitPerTask; try { - // TODO: [SNAPSHOT] HDDS-7968. Reclaim eligible key blocks in - // snapshot's deletedTable when active DB's deletedTable - // doesn't have enough entries left. - // OM would have to keep track of which snapshot the key is coming - // from if the above would be done inside getPendingDeletionKeys(). - // This is to avoid race condition b/w purge request and snapshot chain update. For AOS taking the global - // snapshotId since AOS could process multiple buckets in one iteration. - UUID expectedPreviousSnapshotId = snapshotChainManager.getLatestGlobalSnapshotId(); - PendingKeysDeletion pendingKeysDeletion = manager - .getPendingDeletionKeys(getKeyLimitPerTask()); - List keyBlocksList = pendingKeysDeletion - .getKeyBlocksList(); - if (keyBlocksList != null && !keyBlocksList.isEmpty()) { - delCount = processKeyDeletes(keyBlocksList, - getOzoneManager().getKeyManager(), - pendingKeysDeletion.getKeysToModify(), null, expectedPreviousSnapshotId); - deletedKeyCount.addAndGet(delCount); - metrics.incrNumKeysProcessed(keyBlocksList.size()); - metrics.incrNumKeysSentForPurge(delCount); - } + remainNum = processDeletedKeysForStore(null, getOzoneManager().getKeyManager(), + remainNum); } catch (IOException e) { - LOG.error("Error while running delete keys background task. Will " + - "retry at next run.", e); + LOG.error("Error while running delete directories and files " + + "background task. Will retry at next run. on active object store", e); + } finally { + isRunningOnAOS.set(false); } - try { - if (deepCleanSnapshots && delCount < keyLimitPerTask) { - processSnapshotDeepClean(delCount); - } - } catch (Exception e) { - LOG.error("Error while running deep clean on snapshots. Will " + - "retry at next run.", e); - } + if (deepCleanSnapshots && remainNum > 0) { + OmSnapshotManager omSnapshotManager = getOzoneManager().getOmSnapshotManager(); + Iterator iterator = null; + try { + iterator = snapshotChainManager.iterator(true); - } - isRunningOnAOS.set(false); - synchronized (deletingService) { - this.deletingService.notify(); - } - - // By design, no one cares about the results of this call back. - return EmptyTaskResult.newResult(); - } - - @SuppressWarnings("checkstyle:MethodLength") - private void processSnapshotDeepClean(int delCount) - throws IOException { - OmSnapshotManager omSnapshotManager = - getOzoneManager().getOmSnapshotManager(); - OmMetadataManagerImpl metadataManager = (OmMetadataManagerImpl) - getOzoneManager().getMetadataManager(); - SnapshotChainManager snapChainManager = metadataManager - .getSnapshotChainManager(); - Table snapshotInfoTable = - getOzoneManager().getMetadataManager().getSnapshotInfoTable(); - List deepCleanedSnapshots = new ArrayList<>(); - try (TableIterator> iterator = snapshotInfoTable.iterator()) { - - while (delCount < keyLimitPerTask && iterator.hasNext()) { - List keysToPurge = new ArrayList<>(); - HashMap keysToModify = new HashMap<>(); - SnapshotInfo currSnapInfo = snapshotInfoTable.get(iterator.next().getKey()); - // Deep clean only on active snapshot. Deleted Snapshots will be - // cleaned up by SnapshotDeletingService. - if (currSnapInfo == null || currSnapInfo.getSnapshotStatus() != SNAPSHOT_ACTIVE || - currSnapInfo.getDeepClean()) { - continue; + } catch (IOException e) { + LOG.error("Error while initializing snapshot chain iterator."); + return BackgroundTaskResult.EmptyTaskResult.newResult(); } - SnapshotInfo prevSnapInfo = SnapshotUtils.getPreviousSnapshot(getOzoneManager(), snapChainManager, - currSnapInfo); - if (prevSnapInfo != null && - (prevSnapInfo.getSnapshotStatus() != SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE || - !OmSnapshotManager.areSnapshotChangesFlushedToDB(getOzoneManager().getMetadataManager(), - prevSnapInfo))) { - continue; - } - - try (ReferenceCounted - rcCurrOmSnapshot = omSnapshotManager.getSnapshot( - currSnapInfo.getVolumeName(), - currSnapInfo.getBucketName(), - currSnapInfo.getName())) { - OmSnapshot currOmSnapshot = rcCurrOmSnapshot.get(); - - Table snapDeletedTable = - currOmSnapshot.getMetadataManager().getDeletedTable(); - Table snapRenamedTable = - currOmSnapshot.getMetadataManager().getSnapshotRenamedTable(); - - long volumeId = metadataManager.getVolumeId( - currSnapInfo.getVolumeName()); - // Get bucketInfo for the snapshot bucket to get bucket layout. - String dbBucketKey = metadataManager.getBucketKey( - currSnapInfo.getVolumeName(), currSnapInfo.getBucketName()); - OmBucketInfo bucketInfo = metadataManager.getBucketTable() - .get(dbBucketKey); - - if (bucketInfo == null) { - throw new IllegalStateException("Bucket " + "/" + currSnapInfo - .getVolumeName() + "/" + currSnapInfo.getBucketName() + - " is not found. BucketInfo should not be null for" + - " snapshotted bucket. The OM is in unexpected state."); - } - - String snapshotBucketKey = dbBucketKey + OzoneConsts.OM_KEY_PREFIX; - SnapshotInfo previousSnapshot = SnapshotUtils.getPreviousSnapshot(getOzoneManager(), snapChainManager, - currSnapInfo); - SnapshotInfo previousToPrevSnapshot = null; - - if (previousSnapshot != null) { - previousToPrevSnapshot = SnapshotUtils.getPreviousSnapshot(getOzoneManager(), snapChainManager, - previousSnapshot); - } - - Table previousKeyTable = null; - Table prevRenamedTable = null; - ReferenceCounted rcPrevOmSnapshot = null; - - // Split RepeatedOmKeyInfo and update current snapshot - // deletedKeyTable and next snapshot deletedKeyTable. - if (previousSnapshot != null) { - rcPrevOmSnapshot = omSnapshotManager.getSnapshot( - previousSnapshot.getVolumeName(), - previousSnapshot.getBucketName(), - previousSnapshot.getName()); - OmSnapshot omPreviousSnapshot = rcPrevOmSnapshot.get(); - - previousKeyTable = omPreviousSnapshot.getMetadataManager() - .getKeyTable(bucketInfo.getBucketLayout()); - prevRenamedTable = omPreviousSnapshot - .getMetadataManager().getSnapshotRenamedTable(); - } - - Table previousToPrevKeyTable = null; - ReferenceCounted rcPrevToPrevOmSnapshot = null; - if (previousToPrevSnapshot != null) { - rcPrevToPrevOmSnapshot = omSnapshotManager.getSnapshot( - previousToPrevSnapshot.getVolumeName(), - previousToPrevSnapshot.getBucketName(), - previousToPrevSnapshot.getName()); - OmSnapshot omPreviousToPrevSnapshot = rcPrevToPrevOmSnapshot.get(); - - previousToPrevKeyTable = omPreviousToPrevSnapshot - .getMetadataManager() - .getKeyTable(bucketInfo.getBucketLayout()); - } - - try (TableIterator> deletedIterator = snapDeletedTable - .iterator()) { - - String lastKeyInCurrentRun = null; - String deletedTableSeek = snapshotSeekMap.getOrDefault( - currSnapInfo.getTableKey(), snapshotBucketKey); - deletedIterator.seek(deletedTableSeek); - // To avoid processing the last key from the previous - // run again. - if (!deletedTableSeek.equals(snapshotBucketKey) && - deletedIterator.hasNext()) { - deletedIterator.next(); + while (iterator.hasNext() && remainNum > 0) { + UUID snapshotId = iterator.next(); + try { + SnapshotInfo snapInfo = SnapshotUtils.getSnapshotInfo(getOzoneManager(), snapshotChainManager, + snapshotId); + // Wait for snapshot changes to be flushed to disk. + if (!OmSnapshotManager.areSnapshotChangesFlushedToDB(getOzoneManager().getMetadataManager(), snapInfo)) { + LOG.info("Skipping snapshot processing since changes to snapshot {} have not been flushed to disk", + snapInfo); + continue; } - - while (deletedIterator.hasNext() && delCount < keyLimitPerTask) { - Table.KeyValue - deletedKeyValue = deletedIterator.next(); - String deletedKey = deletedKeyValue.getKey(); - lastKeyInCurrentRun = deletedKey; - - // Exit if it is out of the bucket scope. - if (!deletedKey.startsWith(snapshotBucketKey)) { - break; - } - - RepeatedOmKeyInfo repeatedOmKeyInfo = - deletedKeyValue.getValue(); - - List blockGroupList = new ArrayList<>(); - RepeatedOmKeyInfo newRepeatedOmKeyInfo = - new RepeatedOmKeyInfo(); - for (OmKeyInfo keyInfo : repeatedOmKeyInfo.getOmKeyInfoList()) { - if (previousSnapshot != null) { - // Calculates the exclusive size for the previous - // snapshot. See Java Doc for more info. - calculateExclusiveSize(previousSnapshot, - previousToPrevSnapshot, keyInfo, bucketInfo, volumeId, - snapRenamedTable, previousKeyTable, prevRenamedTable, - previousToPrevKeyTable, exclusiveSizeMap, - exclusiveReplicatedSizeMap); - } - - if (isKeyReclaimable(previousKeyTable, snapRenamedTable, - keyInfo, bucketInfo, volumeId, null)) { - List blocksForKeyDelete = currOmSnapshot - .getMetadataManager() - .getBlocksForKeyDelete(deletedKey); - if (blocksForKeyDelete != null) { - blockGroupList.addAll(blocksForKeyDelete); - } - delCount++; - } else { - newRepeatedOmKeyInfo.addOmKeyInfo(keyInfo); - } - } - - if (!newRepeatedOmKeyInfo.getOmKeyInfoList().isEmpty() && - newRepeatedOmKeyInfo.getOmKeyInfoList().size() != - repeatedOmKeyInfo.getOmKeyInfoList().size()) { - keysToModify.put(deletedKey, newRepeatedOmKeyInfo); - } - - if (newRepeatedOmKeyInfo.getOmKeyInfoList().size() != - repeatedOmKeyInfo.getOmKeyInfoList().size()) { - keysToPurge.addAll(blockGroupList); - } + // Check if snapshot has been directory deep cleaned. Return if directory deep cleaning is not + // done. + if (!snapInfo.getDeepCleanedDeletedDir()) { + LOG.debug("Snapshot {} hasn't done deleted directory deep cleaning yet. Skipping the snapshot in this" + + " iteration.", snapInfo); + continue; } - - if (delCount < keyLimitPerTask) { - // Deep clean is completed, we can update the SnapInfo. - deepCleanedSnapshots.add(currSnapInfo.getTableKey()); - // exclusiveSizeList contains check is used to prevent - // case where there is no entry in deletedTable, this - // will throw NPE when we submit request. - if (previousSnapshot != null && exclusiveSizeMap - .containsKey(previousSnapshot.getTableKey())) { - completedExclusiveSizeSet.add( - previousSnapshot.getTableKey()); - } - - snapshotSeekMap.remove(currSnapInfo.getTableKey()); - } else { - // There are keys that still needs processing - // we can continue from it in the next iteration - if (lastKeyInCurrentRun != null) { - snapshotSeekMap.put(currSnapInfo.getTableKey(), - lastKeyInCurrentRun); - } + // Checking if snapshot has been key deep cleaned already. + if (snapInfo.getDeepClean()) { + LOG.debug("Snapshot {} has already done deleted key deep cleaning.", snapInfo); + continue; } - - if (!keysToPurge.isEmpty()) { - processKeyDeletes(keysToPurge, currOmSnapshot.getKeyManager(), - keysToModify, currSnapInfo.getTableKey(), - Optional.ofNullable(previousSnapshot).map(SnapshotInfo::getSnapshotId).orElse(null)); + try (ReferenceCounted omSnapshot = omSnapshotManager.getSnapshot(snapInfo.getVolumeName(), + snapInfo.getBucketName(), snapInfo.getName())) { + remainNum = processDeletedKeysForStore(snapInfo, omSnapshot.get().getKeyManager(), remainNum); } - } finally { - IOUtils.closeQuietly(rcPrevOmSnapshot, rcPrevToPrevOmSnapshot); + + } catch (IOException e) { + LOG.error("Error while running delete directories and files " + + "background task for snapshot: {}. Will retry at next run. on active object store", snapshotId, e); } } - } } - - updateDeepCleanedSnapshots(deepCleanedSnapshots); - updateSnapshotExclusiveSize(); - } - - private void updateSnapshotExclusiveSize() { - - if (completedExclusiveSizeSet.isEmpty()) { - return; - } - - Iterator completedSnapshotIterator = - completedExclusiveSizeSet.iterator(); - while (completedSnapshotIterator.hasNext()) { - ClientId clientId = ClientId.randomId(); - String dbKey = completedSnapshotIterator.next(); - SnapshotSize snapshotSize = SnapshotSize.newBuilder() - .setExclusiveSize(exclusiveSizeMap.getOrDefault(dbKey, 0L)) - .setExclusiveReplicatedSize( - exclusiveReplicatedSizeMap.getOrDefault(dbKey, 0L)) - .build(); - SetSnapshotPropertyRequest setSnapshotPropertyRequest = - SetSnapshotPropertyRequest.newBuilder() - .setSnapshotKey(dbKey) - .setSnapshotSize(snapshotSize) - .build(); - - OMRequest omRequest = OMRequest.newBuilder() - .setCmdType(Type.SetSnapshotProperty) - .setSetSnapshotPropertyRequest(setSnapshotPropertyRequest) - .setClientId(clientId.toString()) - .build(); - submitRequest(omRequest, clientId); - exclusiveSizeMap.remove(dbKey); - exclusiveReplicatedSizeMap.remove(dbKey); - completedSnapshotIterator.remove(); - } - } - - private void updateDeepCleanedSnapshots(List deepCleanedSnapshots) { - for (String deepCleanedSnapshot: deepCleanedSnapshots) { - ClientId clientId = ClientId.randomId(); - SetSnapshotPropertyRequest setSnapshotPropertyRequest = - SetSnapshotPropertyRequest.newBuilder() - .setSnapshotKey(deepCleanedSnapshot) - .setDeepCleanedDeletedKey(true) - .build(); - - OMRequest omRequest = OMRequest.newBuilder() - .setCmdType(Type.SetSnapshotProperty) - .setSetSnapshotPropertyRequest(setSnapshotPropertyRequest) - .setClientId(clientId.toString()) - .build(); - - submitRequest(omRequest, clientId); - } - } - - public void submitRequest(OMRequest omRequest, ClientId clientId) { - try { - OzoneManagerRatisUtils.submitRequest(getOzoneManager(), omRequest, clientId, getRunCount().get()); - } catch (ServiceException e) { - LOG.error("Snapshot deep cleaning request failed. " + - "Will retry at next run.", e); - } + // By design, no one cares about the results of this call back. + return EmptyTaskResult.newResult(); } } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java index a570a452c224..2e565760179f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java @@ -193,7 +193,7 @@ public BackgroundTaskResult call() throws InterruptedException { // Get all entries from deletedKeyTable. List>> deletedKeyEntries = snapshotKeyManager.getDeletedKeyEntries(snapInfo.getVolumeName(), snapInfo.getBucketName(), - null, remaining); + null, (kv) -> true, remaining); moveCount += deletedKeyEntries.size(); // Get all entries from deletedDirTable. List> deletedDirEntries = snapshotKeyManager.getDeletedDirEntries( @@ -201,7 +201,7 @@ public BackgroundTaskResult call() throws InterruptedException { moveCount += deletedDirEntries.size(); // Get all entries from snapshotRenamedTable. List> renameEntries = snapshotKeyManager.getRenamesKeyEntries( - snapInfo.getVolumeName(), snapInfo.getBucketName(), null, remaining - moveCount); + snapInfo.getVolumeName(), snapInfo.getBucketName(), null, (kv) -> true, remaining - moveCount); moveCount += renameEntries.size(); if (moveCount > 0) { List deletedKeys = new ArrayList<>(deletedKeyEntries.size()); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java index 22740426e29c..3035f9717302 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; +import org.apache.ratis.util.function.CheckedFunction; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; @@ -74,7 +75,8 @@ private List> mockTableIterator( Class valueClass, Table table, int numberOfVolumes, int numberOfBucketsPerVolume, int numberOfKeysPerBucket, String volumeNamePrefix, String bucketNamePrefix, String keyPrefix, Integer volumeNumberFilter, Integer bucketNumberFilter, Integer startVolumeNumber, Integer startBucketNumber, - Integer startKeyNumber, int numberOfEntries) throws IOException { + Integer startKeyNumber, CheckedFunction, Boolean, IOException> filter, + int numberOfEntries) throws IOException { TreeMap values = new TreeMap<>(); List> keyValues = new ArrayList<>(); String startKey = startVolumeNumber == null || startBucketNumber == null || startKeyNumber == null ? null @@ -98,7 +100,13 @@ private List> mockTableIterator( } when(table.iterator(anyString())).thenAnswer(i -> new MapBackedTableIterator<>(values, i.getArgument(0))); - return keyValues.subList(0, Math.min(numberOfEntries, keyValues.size())); + return keyValues.stream().filter(kv -> { + try { + return filter.apply(kv); + } catch (IOException e) { + throw new RuntimeException(e); + } + }).limit(numberOfEntries).collect(Collectors.toList()); } @ParameterizedTest @@ -119,10 +127,12 @@ public void testGetDeletedKeyEntries(int numberOfVolumes, int numberOfBucketsPer KeyManagerImpl km = new KeyManagerImpl(null, null, metadataManager, configuration, null, null, null); Table mockedDeletedTable = Mockito.mock(Table.class); when(metadataManager.getDeletedTable()).thenReturn(mockedDeletedTable); + CheckedFunction, Boolean, IOException> filter = + (kv) -> Long.parseLong(kv.getKey().split(keyPrefix)[1]) % 2 == 0; List>> expectedEntries = mockTableIterator( RepeatedOmKeyInfo.class, mockedDeletedTable, numberOfVolumes, numberOfBucketsPerVolume, numberOfKeysPerBucket, volumeNamePrefix, bucketNamePrefix, keyPrefix, volumeNumber, bucketNumber, startVolumeNumber, startBucketNumber, - startKeyNumber, numberOfEntries).stream() + startKeyNumber, filter, numberOfEntries).stream() .map(kv -> { try { String key = kv.getKey(); @@ -140,9 +150,10 @@ public void testGetDeletedKeyEntries(int numberOfVolumes, int numberOfBucketsPer : (String.format("/%s%010d/%s%010d/%s%010d", volumeNamePrefix, startVolumeNumber, bucketNamePrefix, startBucketNumber, keyPrefix, startKeyNumber)); if (expectedException != null) { - assertThrows(expectedException, () -> km.getDeletedKeyEntries(volumeName, bucketName, startKey, numberOfEntries)); + assertThrows(expectedException, () -> km.getDeletedKeyEntries(volumeName, bucketName, startKey, filter, + numberOfEntries)); } else { - assertEquals(expectedEntries, km.getDeletedKeyEntries(volumeName, bucketName, startKey, numberOfEntries)); + assertEquals(expectedEntries, km.getDeletedKeyEntries(volumeName, bucketName, startKey, filter, numberOfEntries)); } } @@ -164,19 +175,22 @@ public void testGetRenameKeyEntries(int numberOfVolumes, int numberOfBucketsPerV KeyManagerImpl km = new KeyManagerImpl(null, null, metadataManager, configuration, null, null, null); Table mockedRenameTable = Mockito.mock(Table.class); when(metadataManager.getSnapshotRenamedTable()).thenReturn(mockedRenameTable); + CheckedFunction, Boolean, IOException> filter = + (kv) -> Long.parseLong(kv.getKey().split(keyPrefix)[1]) % 2 == 0; List> expectedEntries = mockTableIterator( String.class, mockedRenameTable, numberOfVolumes, numberOfBucketsPerVolume, numberOfKeysPerBucket, volumeNamePrefix, bucketNamePrefix, keyPrefix, volumeNumber, bucketNumber, startVolumeNumber, startBucketNumber, - startKeyNumber, numberOfEntries); + startKeyNumber, filter, numberOfEntries); String volumeName = volumeNumber == null ? null : (String.format("%s%010d", volumeNamePrefix, volumeNumber)); String bucketName = bucketNumber == null ? null : (String.format("%s%010d", bucketNamePrefix, bucketNumber)); String startKey = startVolumeNumber == null || startBucketNumber == null || startKeyNumber == null ? null : (String.format("/%s%010d/%s%010d/%s%010d", volumeNamePrefix, startVolumeNumber, bucketNamePrefix, startBucketNumber, keyPrefix, startKeyNumber)); if (expectedException != null) { - assertThrows(expectedException, () -> km.getRenamesKeyEntries(volumeName, bucketName, startKey, numberOfEntries)); + assertThrows(expectedException, () -> km.getRenamesKeyEntries(volumeName, bucketName, startKey, + filter, numberOfEntries)); } else { - assertEquals(expectedEntries, km.getRenamesKeyEntries(volumeName, bucketName, startKey, numberOfEntries)); + assertEquals(expectedEntries, km.getRenamesKeyEntries(volumeName, bucketName, startKey, filter, numberOfEntries)); } } @@ -202,7 +216,7 @@ public void testGetDeletedDirEntries(int numberOfVolumes, int numberOfBucketsPer List> expectedEntries = mockTableIterator( OmKeyInfo.class, mockedDeletedDirTable, numberOfVolumes, numberOfBucketsPerVolume, numberOfKeysPerBucket, volumeNamePrefix, bucketNamePrefix, keyPrefix, volumeNumber, bucketNumber, startVolumeNumber, startBucketNumber, - startKeyNumber, numberOfEntries); + startKeyNumber, (kv) -> true, numberOfEntries); String volumeName = volumeNumber == null ? null : (String.format("%s%010d", volumeNamePrefix, volumeNumber)); String bucketName = bucketNumber == null ? null : (String.format("%s%010d", bucketNamePrefix, bucketNumber)); if (expectedException != null) { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java index 20a91080c50c..b22b2b7e3acf 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java @@ -22,12 +22,14 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_DEEP_CLEANING_ENABLED; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_DIRECTORY_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.when; @@ -60,6 +62,7 @@ import org.apache.hadoop.ozone.om.KeyManager; import org.apache.hadoop.ozone.om.KeyManagerImpl; import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OmSnapshot; import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OmTestManagers; @@ -80,6 +83,7 @@ import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; +import org.apache.hadoop.ozone.om.snapshot.filter.ReclaimableKeyFilter; import org.apache.ozone.test.GenericTestUtils; import org.apache.ozone.test.OzoneTestBase; import org.apache.ratis.util.ExitUtils; @@ -117,6 +121,7 @@ class TestKeyDeletingService extends OzoneTestBase { private KeyManager keyManager; private OMMetadataManager metadataManager; private KeyDeletingService keyDeletingService; + private SnapshotDirectoryCleaningService snapshotDirectoryCleaningService; private ScmBlockLocationTestingClient scmBlockTestingClient; @BeforeAll @@ -132,6 +137,8 @@ private void createConfig(File testDir) { 100, TimeUnit.MILLISECONDS); conf.setTimeDuration(OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL, 100, TimeUnit.MILLISECONDS); + conf.setTimeDuration(OZONE_SNAPSHOT_DIRECTORY_SERVICE_INTERVAL, + 100, TimeUnit.MILLISECONDS); conf.setTimeDuration(OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL, 1, TimeUnit.SECONDS); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, @@ -144,6 +151,7 @@ private void createSubject() throws Exception { OmTestManagers omTestManagers = new OmTestManagers(conf, scmBlockTestingClient, null); keyManager = omTestManagers.getKeyManager(); keyDeletingService = keyManager.getDeletingService(); + snapshotDirectoryCleaningService = keyManager.getSnapshotDirectoryService(); writeClient = omTestManagers.getWriteClient(); om = omTestManagers.getOzoneManager(); metadataManager = omTestManagers.getMetadataManager(); @@ -196,7 +204,9 @@ void checkIfDeleteServiceIsDeletingKeys() () -> getDeletedKeyCount() >= initialDeletedCount + keyCount, 100, 10000); assertThat(getRunCount()).isGreaterThan(initialRunCount); - assertThat(keyManager.getPendingDeletionKeys(Integer.MAX_VALUE).getKeyBlocksList()) + assertThat(keyManager.getPendingDeletionKeys(new ReclaimableKeyFilter(om, om.getOmSnapshotManager(), + ((OmMetadataManagerImpl)om.getMetadataManager()).getSnapshotChainManager(), null, + keyManager, om.getMetadataManager().getLock()), Integer.MAX_VALUE).getKeyBlocksList()) .isEmpty(); } @@ -225,7 +235,7 @@ void checkDeletionForKeysWithMultipleVersions() throws Exception { 1000, 10000); assertThat(getRunCount()) .isGreaterThan(initialRunCount); - assertThat(keyManager.getPendingDeletionKeys(Integer.MAX_VALUE).getKeyBlocksList()) + assertThat(keyManager.getPendingDeletionKeys((kv) -> true, Integer.MAX_VALUE).getKeyBlocksList()) .isEmpty(); // The 1st version of the key has 1 block and the 2nd version has 2 @@ -267,7 +277,10 @@ void checkDeletedTableCleanUpForSnapshot() throws Exception { 1000, 10000); assertThat(getRunCount()) .isGreaterThan(initialRunCount); - assertThat(keyManager.getPendingDeletionKeys(Integer.MAX_VALUE).getKeyBlocksList()) + assertThat(keyManager.getPendingDeletionKeys(new ReclaimableKeyFilter(om, om.getOmSnapshotManager(), + ((OmMetadataManagerImpl)om.getMetadataManager()).getSnapshotChainManager(), null, + keyManager, om.getMetadataManager().getLock()), + Integer.MAX_VALUE).getKeyBlocksList()) .isEmpty(); // deletedTable should have deleted key of the snapshot bucket @@ -365,7 +378,7 @@ public void testAOSKeyDeletingWithSnapshotCreateParallelExecution() } }, 1000, 10000); return i.callRealMethod(); - }).when(omSnapshotManager).getSnapshot(ArgumentMatchers.eq(volumeName), ArgumentMatchers.eq(bucketName), + }).when(omSnapshotManager).getActiveSnapshot(ArgumentMatchers.eq(volumeName), ArgumentMatchers.eq(bucketName), ArgumentMatchers.eq(snap1)); assertTableRowCount(snapshotInfoTable, initialSnapshotCount + 1, metadataManager); doAnswer(i -> { @@ -374,7 +387,7 @@ public void testAOSKeyDeletingWithSnapshotCreateParallelExecution() Assertions.assertNotEquals(deletePathKey[0], group.getGroupID()); } return pendingKeysDeletion; - }).when(km).getPendingDeletionKeys(anyInt()); + }).when(km).getPendingDeletionKeys(any(), anyInt()); service.runPeriodicalTaskNow(); service.runPeriodicalTaskNow(); assertTableRowCount(snapshotInfoTable, initialSnapshotCount + 2, metadataManager); @@ -571,9 +584,15 @@ void testSnapshotExclusiveSize() throws Exception { // Create Snapshot4 String snap4 = uniqueObjectName("snap"); writeClient.createSnapshot(testVolumeName, testBucketName, snap4); + assertTableRowCount(snapshotInfoTable, initialSnapshotCount + 4, metadataManager); createAndCommitKey(testVolumeName, testBucketName, uniqueObjectName("key"), 3); long prevKdsRunCount = getRunCount(); + long prevSnapshotDirectorServiceCnt = snapshotDirectoryCleaningService.getRunCount().get(); + // Let SnapshotDirectoryCleaningService to run for some iterations + GenericTestUtils.waitFor( + () -> (snapshotDirectoryCleaningService.getRunCount().get() > prevSnapshotDirectorServiceCnt + 20), + 100, 100000); keyDeletingService.resume(); Map expectedSize = new ImmutableMap.Builder() @@ -586,22 +605,23 @@ void testSnapshotExclusiveSize() throws Exception { // Let KeyDeletingService to run for some iterations GenericTestUtils.waitFor( - () -> (getRunCount() > prevKdsRunCount + 5), - 100, 10000); - + () -> (getRunCount() > prevKdsRunCount + 20), + 100, 100000); // Check if the exclusive size is set. + om.awaitDoubleBufferFlush(); try (TableIterator> iterator = snapshotInfoTable.iterator()) { while (iterator.hasNext()) { Table.KeyValue snapshotEntry = iterator.next(); + SnapshotInfo snapshotInfo = om.getMetadataManager().getSnapshotInfoTable().get(snapshotEntry.getKey()); String snapshotName = snapshotEntry.getValue().getName(); - Long expected = expectedSize.getOrDefault(snapshotName, 0L); + Long expected = expectedSize.getOrDefault(snapshotName, snapshotInfo.getExclusiveSize()); assertNotNull(expected); System.out.println(snapshotName); - assertEquals(expected, snapshotEntry.getValue().getExclusiveSize()); + assertEquals(expected, snapshotInfo.getExclusiveSize()); // Since for the test we are using RATIS/THREE - assertEquals(expected * 3, snapshotEntry.getValue().getExclusiveReplicatedSize()); + assertEquals(expected * 3, snapshotInfo.getExclusiveReplicatedSize()); } } } @@ -738,8 +758,7 @@ private static void checkSnapDeepCleanStatus(Table table, private static void assertTableRowCount(Table table, long count, OMMetadataManager metadataManager) throws TimeoutException, InterruptedException { - GenericTestUtils.waitFor(() -> assertTableRowCount(count, table, - metadataManager), 1000, 120000); // 2 minutes + GenericTestUtils.waitFor(() -> assertTableRowCount(count, table, metadataManager), 1000, 120000); // 2 minutes } private static boolean assertTableRowCount(long expectedCount, @@ -875,7 +894,7 @@ private long getRunCount() { private int countKeysPendingDeletion() { try { - final int count = keyManager.getPendingDeletionKeys(Integer.MAX_VALUE) + final int count = keyManager.getPendingDeletionKeys((kv) -> true, Integer.MAX_VALUE) .getKeyBlocksList().size(); LOG.debug("KeyManager keys pending deletion: {}", count); return count; @@ -886,7 +905,7 @@ private int countKeysPendingDeletion() { private long countBlocksPendingDeletion() { try { - return keyManager.getPendingDeletionKeys(Integer.MAX_VALUE) + return keyManager.getPendingDeletionKeys((kv) -> true, Integer.MAX_VALUE) .getKeyBlocksList() .stream() .map(BlockGroup::getBlockIDList) From 40b6a83370d32cf70eecfab5dafd5d6ba3638b4b Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 14 May 2025 00:54:37 -0400 Subject: [PATCH 10/20] HDDS-13031. Fix Locking setbit issue Change-Id: I1712f2dd4138a1f679eb4e4676b3d5ac41c80583 --- .../org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java | 4 ++-- .../org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java index 2b2de20698ee..7354b4b1c24a 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java @@ -668,6 +668,7 @@ long getStartWriteHeldTimeNanos() { } Resource(String name) { + this.lockLevel = -1; this.name = name; this.mask = 0; this.setMask = 0; @@ -686,7 +687,6 @@ boolean canLock(short lockSetVal) { return false; } - // Our mask is the summation of bits of all previous possible locks. In // other words it is the largest possible value for that bit position. @@ -723,7 +723,7 @@ short clearLock(short lockSetVal) { * @param lockSetVal */ boolean isLevelLocked(short lockSetVal) { - return (lockSetVal & setMask) == setMask; + return setMask != 0 && (lockSetVal & setMask) == setMask; } String getName() { diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java index 3f1e7ca83c60..441f49fe00da 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java @@ -30,6 +30,7 @@ import java.util.Stack; import java.util.UUID; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.metrics2.MetricsRecord; import org.apache.hadoop.metrics2.impl.MetricsCollectorImpl; From c962507c0ab9591140b34de5b1477b8f5e3f178f Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 14 May 2025 05:21:03 -0400 Subject: [PATCH 11/20] HDDS-13031. Fix checkstyle Change-Id: Id4a53d3eb8f23031ef517b3793dc73b067b11d62 --- .../org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java | 1 - 1 file changed, 1 deletion(-) diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java index 441f49fe00da..3f1e7ca83c60 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java @@ -30,7 +30,6 @@ import java.util.Stack; import java.util.UUID; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.stream.Collectors; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.metrics2.MetricsRecord; import org.apache.hadoop.metrics2.impl.MetricsCollectorImpl; From 4b843076b2027b71f5f6fa40ca96d1528ad8ce01 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Tue, 20 May 2025 18:58:40 -0400 Subject: [PATCH 12/20] HDDS-13026. Fix checkstyle Change-Id: Ibfcfe5d817ac6ac83f76abe3af08b5794bbb4b3e --- .../ozone/om/service/AbstractKeyDeletingService.java | 4 ++-- .../apache/hadoop/ozone/om/service/KeyDeletingService.java | 5 ++--- .../hadoop/ozone/om/service/TestKeyDeletingService.java | 7 ++++--- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java index 73510f5c2c7f..38ea6f1b6fcf 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java @@ -143,8 +143,8 @@ protected Pair processKeyDeletes(List keyBlocksLis * @param keysToModify Updated list of RepeatedOmKeyInfo */ private Pair submitPurgeKeysRequest(List results, - Map keysToModify, List renameEntriesToBeDeleted, - String snapTableKey, UUID expectedPreviousSnapshotId) { + Map keysToModify, List renameEntriesToBeDeleted, + String snapTableKey, UUID expectedPreviousSnapshotId) { List purgeKeys = new ArrayList<>(); // Put all keys to be purged in a list diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java index 8b927f4fef33..b479e4141243 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java @@ -248,9 +248,8 @@ private int processDeletedKeysForStore(SnapshotInfo currentSnapshotInfo, KeyMana // Validating if the previous snapshot is still the same before purging the blocks. SnapshotUtils.validatePreviousSnapshotId(currentSnapshotInfo, snapshotChainManager, expectedPreviousSnapshotId); - Pair purgeResult = processKeyDeletes(keyBlocksList, keyManager, - pendingKeysDeletion.getKeysToModify(), renamedTableEntries, snapshotTableKey, - expectedPreviousSnapshotId); + Pair purgeResult = processKeyDeletes(keyBlocksList, pendingKeysDeletion.getKeysToModify(), + renamedTableEntries, snapshotTableKey, expectedPreviousSnapshotId); remainNum -= purgeResult.getKey(); successStatus = purgeResult.getValue(); if (successStatus) { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java index 77159bdf9f5e..3238a160449f 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java @@ -29,7 +29,6 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.mockito.ArgumentMatchers.any; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyInt; @@ -91,8 +90,8 @@ import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.om.snapshot.filter.ReclaimableKeyFilter; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.ozone.test.GenericTestUtils; import org.apache.ozone.test.OzoneTestBase; import org.apache.ratis.util.ExitUtils; @@ -680,6 +679,7 @@ public void testFailingModifiedKeyPurge() throws IOException { }); List blockGroups = Collections.singletonList(BlockGroup.newBuilder().setKeyName("key1") .addAllBlockIDs(Collections.singletonList(new BlockID(1, 1))).build()); + List renameEntriesToBeDeleted = Collections.singletonList("key2"); OmKeyInfo omKeyInfo = new OmKeyInfo.Builder() .setBucketName("buck") .setVolumeName("vol") @@ -692,8 +692,9 @@ public void testFailingModifiedKeyPurge() throws IOException { .build(); Map keysToModify = Collections.singletonMap("key1", new RepeatedOmKeyInfo(Collections.singletonList(omKeyInfo))); - keyDeletingService.processKeyDeletes(blockGroups, keysToModify, null, null); + keyDeletingService.processKeyDeletes(blockGroups, keysToModify, renameEntriesToBeDeleted, null, null); assertTrue(purgeRequest.get().getPurgeKeysRequest().getKeysToUpdateList().isEmpty()); + assertEquals(renameEntriesToBeDeleted, purgeRequest.get().getPurgeKeysRequest().getRenamedKeysList()); } } From 6116cbac83d3b229e63f2c13387579f7af33b391 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Fri, 23 May 2025 22:54:23 -0400 Subject: [PATCH 13/20] HDDS-13026. Merge with master Change-Id: I7af63cb80766fc83ec75ef9f7ad0ca8d299caa7a --- .../apache/hadoop/ozone/om/service/KeyDeletingService.java | 6 +----- .../hadoop/ozone/om/service/TestKeyDeletingService.java | 2 -- 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java index 068df715f62b..ac1d8cde9b5f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java @@ -53,12 +53,8 @@ import org.apache.hadoop.ozone.om.SnapshotChainManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; -import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; -import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; import org.apache.hadoop.ozone.om.snapshot.filter.ReclaimableKeyFilter; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.om.snapshot.filter.ReclaimableKeyFilter; import org.apache.hadoop.ozone.om.snapshot.filter.ReclaimableRenameEntryFilter; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetSnapshotPropertyRequest; @@ -240,7 +236,7 @@ private void processDeletedKeysForStore(SnapshotInfo currentSnapshotInfo, KeyMan getOzoneManager(), omSnapshotManager, snapshotChainManager, currentSnapshotInfo, keyManager, lock)) { List renamedTableEntries = - keyManager.getRenamesKeyEntries(volume, bucket, startKey, renameEntryFilter, remainNum).stream() + keyManager.getRenamesKeyEntries(volume, bucket, null, renameEntryFilter, remainNum).stream() .map(entry -> { try { return entry.getKey(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java index 1dc4ff70da9d..a7bdce01cdb9 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java @@ -89,8 +89,6 @@ import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; -import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.filter.ReclaimableKeyFilter; import org.apache.hadoop.ozone.om.snapshot.filter.ReclaimableKeyFilter; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.ozone.test.GenericTestUtils; From 3af0ac6b78a9225e8f03db3f25251881246038a6 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Fri, 23 May 2025 23:02:41 -0400 Subject: [PATCH 14/20] HDDS-13026. Merge with master Change-Id: Ib4c0199f50e5efb8f40b4e5c897d4b33acdf9d62 --- .../org/apache/hadoop/ozone/om/service/KeyDeletingService.java | 1 + 1 file changed, 1 insertion(+) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java index ac1d8cde9b5f..9dba72eb8d6c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java @@ -245,6 +245,7 @@ private void processDeletedKeysForStore(SnapshotInfo currentSnapshotInfo, KeyMan } }).collect(Collectors.toList()); remainNum -= renamedTableEntries.size(); + // Get pending keys that can be deleted PendingKeysDeletion pendingKeysDeletion = currentSnapshotInfo == null ? keyManager.getPendingDeletionKeys(reclaimableKeyFilter, remainNum) From 62496988366cc9d68a7e0280969a0a7dde15d237 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Fri, 23 May 2025 23:10:20 -0400 Subject: [PATCH 15/20] HDDS-13026. Add OmKeyPurgeRequest changes Change-Id: I67ca2fea367627a7a0ad97312ae450c74fabe95b --- .../ozone/om/request/key/OMKeyPurgeRequest.java | 5 +++-- .../ozone/om/response/key/OMKeyPurgeResponse.java | 13 +++++++++---- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java index 75d519f2b33f..80a63e8a291d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java @@ -91,6 +91,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Execut List keysToBePurgedList = new ArrayList<>(); int numKeysDeleted = 0; + List renamedKeysToBePurged = new ArrayList<>(purgeKeysRequest.getRenamedKeysList()); for (DeletedKeys bucketWithDeleteKeys : bucketDeletedKeysList) { List keysList = bucketWithDeleteKeys.getKeysList(); keysToBePurgedList.addAll(keysList); @@ -99,7 +100,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Execut DeletingServiceMetrics deletingServiceMetrics = ozoneManager.getDeletionMetrics(); deletingServiceMetrics.incrNumKeysPurged(numKeysDeleted); - if (keysToBePurgedList.isEmpty()) { + if (keysToBePurgedList.isEmpty() && renamedKeysToBePurged.isEmpty()) { return new OMKeyPurgeResponse(createErrorOMResponse(omResponse, new OMException("None of the keys can be purged be purged since a new snapshot was created for all the " + "buckets, making this request invalid", OMException.ResultCodes.KEY_DELETION_ERROR))); @@ -118,7 +119,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Execut } return new OMKeyPurgeResponse(omResponse.build(), - keysToBePurgedList, fromSnapshotInfo, keysToUpdateList); + keysToBePurgedList, renamedKeysToBePurged, fromSnapshotInfo, keysToUpdateList); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java index 7a1aebe6a4f5..8571fa07741c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java @@ -45,15 +45,18 @@ @CleanupTableInfo(cleanupTables = {DELETED_TABLE, SNAPSHOT_INFO_TABLE}) public class OMKeyPurgeResponse extends OmKeyResponse { private List purgeKeyList; + private List renamedList; private SnapshotInfo fromSnapshot; private List keysToUpdateList; public OMKeyPurgeResponse(@Nonnull OMResponse omResponse, @Nonnull List keyList, + @Nonnull List renamedList, SnapshotInfo fromSnapshot, List keysToUpdate) { super(omResponse); this.purgeKeyList = keyList; + this.renamedList = renamedList; this.fromSnapshot = fromSnapshot; this.keysToUpdateList = keysToUpdate; } @@ -103,19 +106,21 @@ private void processKeysToUpdate(BatchOperation batchOp, for (SnapshotMoveKeyInfos keyToUpdate : keysToUpdateList) { List keyInfosList = keyToUpdate.getKeyInfosList(); - RepeatedOmKeyInfo repeatedOmKeyInfo = - createRepeatedOmKeyInfo(keyInfosList); + RepeatedOmKeyInfo repeatedOmKeyInfo = createRepeatedOmKeyInfo(keyInfosList); metadataManager.getDeletedTable().putWithBatch(batchOp, keyToUpdate.getKey(), repeatedOmKeyInfo); } } - private void processKeys(BatchOperation batchOp, - OMMetadataManager metadataManager) throws IOException { + private void processKeys(BatchOperation batchOp, OMMetadataManager metadataManager) throws IOException { for (String key : purgeKeyList) { metadataManager.getDeletedTable().deleteWithBatch(batchOp, key); } + // Delete rename entries. + for (String key : renamedList) { + metadataManager.getSnapshotRenamedTable().deleteWithBatch(batchOp, key); + } } } From 84dcb6b5efd16bab238788c4ade8076251cd185f Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Sat, 24 May 2025 00:02:59 -0400 Subject: [PATCH 16/20] HDDS-13026. Add test case Change-Id: Ie9efcc5f15cee9d58cfcea7e373882c792cdd96b --- .../ozone/om/request/OMRequestTestUtils.java | 10 +++ .../key/TestOMKeyPurgeRequestAndResponse.java | 59 +++++++++++---- .../om/service/TestKeyDeletingService.java | 75 ++++++++++++++++++- 3 files changed, 124 insertions(+), 20 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java index 7b22c26c2bb4..6d85631fa6b1 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java @@ -275,6 +275,16 @@ public static void addKeyToTable(boolean openKeyTable, boolean addToCache, omMetadataManager); } + /** + * Add key entry to SnapshotRenamedTable. + */ + public static String addRenamedEntryToTable(long trxnLogIndex, String volumeName, String bucketName, String key, + OMMetadataManager omMetadataManager) throws Exception { + String renameKey = omMetadataManager.getRenameKey(volumeName, bucketName, trxnLogIndex); + omMetadataManager.getSnapshotRenamedTable().put(renameKey, key); + return renameKey; + } + /** * Add key entry to KeyTable. if openKeyTable flag is true, add's entries * to openKeyTable, else add's it to keyTable. diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java index 39c39953438f..3ca62ca3e340 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java @@ -26,6 +26,7 @@ import java.util.ArrayList; import java.util.List; import java.util.UUID; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.ozone.om.OmSnapshot; @@ -53,7 +54,7 @@ public class TestOMKeyPurgeRequestAndResponse extends TestOMKeyRequest { * Creates volume, bucket and key entries and adds to OM DB and then * deletes these keys to move them to deletedKeys table. */ - private List createAndDeleteKeys(Integer trxnIndex, String bucket) + private Pair, List> createAndDeleteKeysAndRenamedEntry(Integer trxnIndex, String bucket) throws Exception { if (bucket == null) { bucket = bucketName; @@ -63,11 +64,14 @@ private List createAndDeleteKeys(Integer trxnIndex, String bucket) omMetadataManager); List ozoneKeyNames = new ArrayList<>(numKeys); + List renamedEntries = new ArrayList<>(numKeys); for (int i = 1; i <= numKeys; i++) { String key = keyName + "-" + i; OMRequestTestUtils.addKeyToTable(false, false, volumeName, bucket, key, clientID, replicationConfig, trxnIndex++, omMetadataManager); + renamedEntries.add(OMRequestTestUtils.addRenamedEntryToTable(trxnIndex, volumeName, bucket, key, + omMetadataManager)); ozoneKeyNames.add(omMetadataManager.getOzoneKey( volumeName, bucket, key)); } @@ -79,14 +83,14 @@ private List createAndDeleteKeys(Integer trxnIndex, String bucket) deletedKeyNames.add(deletedKeyName); } - return deletedKeyNames; + return Pair.of(deletedKeyNames, renamedEntries); } /** * Create OMRequest which encapsulates DeleteKeyRequest. * @return OMRequest */ - private OMRequest createPurgeKeysRequest(List deletedKeys, + private OMRequest createPurgeKeysRequest(List deletedKeys, List renamedEntries, String snapshotDbKey) { DeletedKeys deletedKeysInBucket = DeletedKeys.newBuilder() .setVolumeName(volumeName) @@ -94,7 +98,7 @@ private OMRequest createPurgeKeysRequest(List deletedKeys, .addAllKeys(deletedKeys) .build(); PurgeKeysRequest.Builder purgeKeysRequest = PurgeKeysRequest.newBuilder() - .addDeletedKeys(deletedKeysInBucket); + .addDeletedKeys(deletedKeysInBucket).addAllRenamedKeys(renamedEntries); if (snapshotDbKey != null) { purgeKeysRequest.setSnapshotTableKey(snapshotDbKey); @@ -123,16 +127,20 @@ private OMRequest preExecute(OMRequest originalOmRequest) throws IOException { @Test public void testValidateAndUpdateCache() throws Exception { // Create and Delete keys. The keys should be moved to DeletedKeys table - List deletedKeyNames = createAndDeleteKeys(1, null); + Pair, List> deleteKeysAndRenamedEntry = createAndDeleteKeysAndRenamedEntry(1, null); // The keys should be present in the DeletedKeys table before purging - for (String deletedKey : deletedKeyNames) { + for (String deletedKey : deleteKeysAndRenamedEntry.getKey()) { assertTrue(omMetadataManager.getDeletedTable().isExist( deletedKey)); } + for (String renamedKey : deleteKeysAndRenamedEntry.getValue()) { + assertTrue(omMetadataManager.getSnapshotRenamedTable().isExist(renamedKey)); + } // Create PurgeKeysRequest to purge the deleted keys - OMRequest omRequest = createPurgeKeysRequest(deletedKeyNames, null); + OMRequest omRequest = createPurgeKeysRequest(deleteKeysAndRenamedEntry.getKey(), + deleteKeysAndRenamedEntry.getValue(), null); OMRequest preExecutedRequest = preExecute(omRequest); OMKeyPurgeRequest omKeyPurgeRequest = @@ -150,7 +158,8 @@ public void testValidateAndUpdateCache() throws Exception { omMetadataManager.getStore().initBatchOperation()) { OMKeyPurgeResponse omKeyPurgeResponse = new OMKeyPurgeResponse( - omResponse, deletedKeyNames, null, null); + omResponse, deleteKeysAndRenamedEntry.getKey(), deleteKeysAndRenamedEntry.getValue(), null, + null); omKeyPurgeResponse.addToDBBatch(omMetadataManager, batchOperation); // Do manual commit and see whether addToBatch is successful or not. @@ -158,37 +167,49 @@ public void testValidateAndUpdateCache() throws Exception { } // The keys should not exist in the DeletedKeys table - for (String deletedKey : deletedKeyNames) { + for (String deletedKey : deleteKeysAndRenamedEntry.getKey()) { assertFalse(omMetadataManager.getDeletedTable().isExist(deletedKey)); } + // Renamed entry should not exist + for (String renamedKey : deleteKeysAndRenamedEntry.getValue()) { + assertFalse(omMetadataManager.getSnapshotRenamedTable().isExist(renamedKey)); + } } @Test public void testKeyPurgeInSnapshot() throws Exception { // Create and Delete keys. The keys should be moved to DeletedKeys table - List deletedKeyNames = createAndDeleteKeys(1, null); + Pair, List> deleteKeysAndRenamedEntry = createAndDeleteKeysAndRenamedEntry(1, null); SnapshotInfo snapInfo = createSnapshot("snap1"); assertEquals(snapInfo.getLastTransactionInfo(), TransactionInfo.valueOf(TransactionInfo.getTermIndex(1L)).toByteString()); // The keys should be not present in the active Db's deletedTable - for (String deletedKey : deletedKeyNames) { + for (String deletedKey : deleteKeysAndRenamedEntry.getKey()) { assertFalse(omMetadataManager.getDeletedTable().isExist(deletedKey)); } + for (String renamedKey : deleteKeysAndRenamedEntry.getValue()) { + assertFalse(omMetadataManager.getSnapshotRenamedTable().isExist(renamedKey)); + } UncheckedAutoCloseableSupplier rcOmSnapshot = ozoneManager.getOmSnapshotManager() .getSnapshot(snapInfo.getVolumeName(), snapInfo.getBucketName(), snapInfo.getName()); OmSnapshot omSnapshot = rcOmSnapshot.get(); // The keys should be present in the snapshot's deletedTable - for (String deletedKey : deletedKeyNames) { + for (String deletedKey : deleteKeysAndRenamedEntry.getKey()) { assertTrue(omSnapshot.getMetadataManager() .getDeletedTable().isExist(deletedKey)); } + // The keys should be present in the snapshot's deletedTable + for (String renamedKey : deleteKeysAndRenamedEntry.getValue()) { + assertTrue(omSnapshot.getMetadataManager() + .getSnapshotRenamedTable().isExist(renamedKey)); + } // Create PurgeKeysRequest to purge the deleted keys - OMRequest omRequest = createPurgeKeysRequest(deletedKeyNames, - snapInfo.getTableKey()); + OMRequest omRequest = createPurgeKeysRequest(deleteKeysAndRenamedEntry.getKey(), + deleteKeysAndRenamedEntry.getValue(), snapInfo.getTableKey()); OMRequest preExecutedRequest = preExecute(omRequest); OMKeyPurgeRequest omKeyPurgeRequest = @@ -211,7 +232,8 @@ public void testKeyPurgeInSnapshot() throws Exception { try (BatchOperation batchOperation = omMetadataManager.getStore().initBatchOperation()) { - OMKeyPurgeResponse omKeyPurgeResponse = new OMKeyPurgeResponse(omResponse, deletedKeyNames, snapInfo, null); + OMKeyPurgeResponse omKeyPurgeResponse = new OMKeyPurgeResponse(omResponse, deleteKeysAndRenamedEntry.getKey(), + deleteKeysAndRenamedEntry.getValue(), snapInfo, null); omKeyPurgeResponse.addToDBBatch(omMetadataManager, batchOperation); // Do manual commit and see whether addToBatch is successful or not. @@ -220,11 +242,16 @@ public void testKeyPurgeInSnapshot() throws Exception { snapshotInfoOnDisk = omMetadataManager.getSnapshotInfoTable().getSkipCache(snapInfo.getTableKey()); assertEquals(snapshotInfoOnDisk, snapInfo); // The keys should not exist in the DeletedKeys table - for (String deletedKey : deletedKeyNames) { + for (String deletedKey : deleteKeysAndRenamedEntry.getKey()) { assertFalse(omSnapshot.getMetadataManager() .getDeletedTable().isExist(deletedKey)); } + for (String renamedEntry : deleteKeysAndRenamedEntry.getValue()) { + assertFalse(omSnapshot.getMetadataManager() + .getSnapshotRenamedTable().isExist(renamedEntry)); + } + omSnapshot = null; rcOmSnapshot.close(); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java index a7bdce01cdb9..bfac7bb90e8d 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java @@ -58,7 +58,6 @@ import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; import org.apache.hadoop.hdds.server.ServerUtils; import org.apache.hadoop.hdds.utils.db.DBConfigFromFile; @@ -104,6 +103,8 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.io.TempDir; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; import org.mockito.ArgumentMatchers; import org.mockito.MockedStatic; import org.mockito.Mockito; @@ -371,7 +372,7 @@ public void testAOSKeyDeletingWithSnapshotCreateParallelExecution() metadataManager.getOzoneKey(volumeName, bucketName, "key2"))}; assertNotNull(deletedTable.get(deletePathKey[0])); - Mockito.doAnswer(i -> { + doAnswer(i -> { writeClient.createSnapshot(volumeName, bucketName, snap2); GenericTestUtils.waitFor(() -> { try { @@ -423,6 +424,72 @@ public void testAOSKeyDeletingWithSnapshotCreateParallelExecution() keyDeletingService.resume(); } + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testRenamedKeyReclaimation(boolean testForSnapshot) + throws IOException, InterruptedException, TimeoutException { + Table snapshotInfoTable = + om.getMetadataManager().getSnapshotInfoTable(); + Table deletedTable = + om.getMetadataManager().getDeletedTable(); + Table keyTable = + om.getMetadataManager().getKeyTable(BucketLayout.DEFAULT); + Table snapshotRenamedTable = om.getMetadataManager().getSnapshotRenamedTable(); + UncheckedAutoCloseableSupplier snapshot = null; + // Suspend KeyDeletingService + keyDeletingService.suspend(); + + final long initialSnapshotCount = metadataManager.countRowsInTable(snapshotInfoTable); + final long initialKeyCount = metadataManager.countRowsInTable(keyTable); + final long initialDeletedCount = metadataManager.countRowsInTable(deletedTable); + final long initialRenamedCount = metadataManager.countRowsInTable(snapshotRenamedTable); + final String volumeName = getTestName(); + final String bucketName = uniqueObjectName("bucket"); + + // Create Volume and Buckets + try { + createVolumeAndBucket(volumeName, bucketName, false); + OmKeyArgs key1 = createAndCommitKey(volumeName, bucketName, + uniqueObjectName("key"), 3); + OmKeyInfo keyInfo = writeClient.getKeyInfo(key1, false).getKeyInfo(); + assertTableRowCount(keyTable, initialKeyCount + 1, metadataManager); + writeClient.createSnapshot(volumeName, bucketName, uniqueObjectName("snap")); + assertTableRowCount(snapshotInfoTable, initialSnapshotCount + 1, metadataManager); + OmKeyArgs key2 = createAndCommitKey(volumeName, bucketName, + uniqueObjectName("key"), 3); + assertTableRowCount(keyTable, initialKeyCount + 2, metadataManager); + + writeClient.renameKey(key1, key1.getKeyName() + "_renamed"); + writeClient.renameKey(key2, key2.getKeyName() + "_renamed"); + assertTableRowCount(keyTable, initialKeyCount + 2, metadataManager); + assertTableRowCount(snapshotRenamedTable, initialRenamedCount + 2, metadataManager); + assertTableRowCount(deletedTable, initialDeletedCount, metadataManager); + if (testForSnapshot) { + String snapshotName = writeClient.createSnapshot(volumeName, bucketName, uniqueObjectName("snap")); + assertTableRowCount(snapshotInfoTable, initialSnapshotCount + 2, metadataManager); + assertTableRowCount(snapshotRenamedTable, initialRenamedCount, metadataManager); + snapshot = om.getOmSnapshotManager().getSnapshot(volumeName, bucketName, snapshotName); + snapshotRenamedTable = snapshot.get().getMetadataManager().getSnapshotRenamedTable(); + } + keyDeletingService.resume(); + assertTableRowCount(snapshotRenamedTable, initialRenamedCount + 1, metadataManager); + try (TableIterator> itr = snapshotRenamedTable.iterator()) { + itr.forEachRemaining(entry -> { + try { + String[] val = metadataManager.splitRenameKey(entry.getKey()); + Assertions.assertEquals(Long.valueOf(val[2]), keyInfo.getObjectID()); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + }); + } + } finally { + if (snapshot != null) { + snapshot.close(); + } + } + } + /* * Create Snap1 * Create 10 keys @@ -847,7 +914,7 @@ private void deleteKey(String volumeName, .setKeyName(keyName) .setAcls(Collections.emptyList()) .setReplicationConfig(StandaloneReplicationConfig.getInstance( - HddsProtos.ReplicationFactor.THREE)) + THREE)) .build(); writeClient.deleteKey(keyArg); } @@ -863,7 +930,7 @@ private void renameKey(String volumeName, .setKeyName(keyName) .setAcls(Collections.emptyList()) .setReplicationConfig(StandaloneReplicationConfig.getInstance( - HddsProtos.ReplicationFactor.THREE)) + THREE)) .build(); writeClient.renameKey(keyArg, toKeyName); } From ea8a3bcba093037c25b2350511adf6c5bcf5d2ca Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Sat, 24 May 2025 00:54:30 -0400 Subject: [PATCH 17/20] HDDS-13026. Add test case Change-Id: I0f7cafce55e87884a0eb9fde3c5e15004b4cdb71 --- .../apache/hadoop/ozone/om/service/TestKeyDeletingService.java | 1 + 1 file changed, 1 insertion(+) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java index bfac7bb90e8d..f799d8af6190 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java @@ -471,6 +471,7 @@ public void testRenamedKeyReclaimation(boolean testForSnapshot) snapshot = om.getOmSnapshotManager().getSnapshot(volumeName, bucketName, snapshotName); snapshotRenamedTable = snapshot.get().getMetadataManager().getSnapshotRenamedTable(); } + assertTableRowCount(snapshotRenamedTable, initialRenamedCount + 2, metadataManager); keyDeletingService.resume(); assertTableRowCount(snapshotRenamedTable, initialRenamedCount + 1, metadataManager); try (TableIterator> itr = snapshotRenamedTable.iterator()) { From fba3c21ca5c6aab0aa1168bed618ea6505abc768 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 26 May 2025 19:31:34 -0400 Subject: [PATCH 18/20] HDDS-13026. Add rename entries purged in metrics Change-Id: I046604c6d5e7c049c99f6e65010a1a40dfbda965 --- .../org/apache/hadoop/ozone/om/DeletingServiceMetrics.java | 6 ++++++ .../hadoop/ozone/om/request/key/OMKeyPurgeRequest.java | 1 + 2 files changed, 7 insertions(+) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/DeletingServiceMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/DeletingServiceMetrics.java index 3e6a4b937f47..baa4a34e774b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/DeletingServiceMetrics.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/DeletingServiceMetrics.java @@ -65,6 +65,8 @@ public final class DeletingServiceMetrics { */ @Metric("Total no. of keys purged") private MutableGaugeLong numKeysPurged; + @Metric("Total no. of rename entries purged") + private MutableGaugeLong numRenameEntriesPurged; private DeletingServiceMetrics() { this.registry = new MetricsRegistry(METRICS_SOURCE_NAME); @@ -154,6 +156,10 @@ public void incrNumKeysPurged(long keysPurged) { this.numKeysPurged.incr(keysPurged); } + public void incrNumRenameEntriesPurged(long renameEntriesPurged) { + this.numRenameEntriesPurged.incr(renameEntriesPurged); + } + @VisibleForTesting public void resetDirectoryMetrics() { numDirsPurged.set(0); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java index 80a63e8a291d..3fd000e523a5 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java @@ -99,6 +99,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Execut } DeletingServiceMetrics deletingServiceMetrics = ozoneManager.getDeletionMetrics(); deletingServiceMetrics.incrNumKeysPurged(numKeysDeleted); + deletingServiceMetrics.incrNumRenameEntriesPurged(renamedKeysToBePurged.size()); if (keysToBePurgedList.isEmpty() && renamedKeysToBePurged.isEmpty()) { return new OMKeyPurgeResponse(createErrorOMResponse(omResponse, From 4f89ca6377662bff9c7360a653eb62e3858182f0 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 26 May 2025 23:15:31 -0400 Subject: [PATCH 19/20] HDDS-13026. Fix test Change-Id: Ifd2c97c3d932fb1335eed97b4535c0b6256add4b --- .../java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java index 3035f9717302..645e561a206e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java @@ -176,7 +176,7 @@ public void testGetRenameKeyEntries(int numberOfVolumes, int numberOfBucketsPerV Table mockedRenameTable = Mockito.mock(Table.class); when(metadataManager.getSnapshotRenamedTable()).thenReturn(mockedRenameTable); CheckedFunction, Boolean, IOException> filter = - (kv) -> Long.parseLong(kv.getKey().split(keyPrefix)[1]) % 2 == 0; + (kv) -> Long.parseLong(kv.getKey().split("/")[3]) % 2 == 0; List> expectedEntries = mockTableIterator( String.class, mockedRenameTable, numberOfVolumes, numberOfBucketsPerVolume, numberOfKeysPerBucket, volumeNamePrefix, bucketNamePrefix, keyPrefix, volumeNumber, bucketNumber, startVolumeNumber, startBucketNumber, From 77862706d4665e10ba489ea8dcec85ce1b2c18f6 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Tue, 27 May 2025 17:43:43 -0400 Subject: [PATCH 20/20] HDDS-13026. Fix alignment Change-Id: I16f7492a085ae56d6ce6402ee5fa2e102636d4bf --- .../org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java index 6d85631fa6b1..cfde5ab8560f 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java @@ -279,7 +279,7 @@ public static void addKeyToTable(boolean openKeyTable, boolean addToCache, * Add key entry to SnapshotRenamedTable. */ public static String addRenamedEntryToTable(long trxnLogIndex, String volumeName, String bucketName, String key, - OMMetadataManager omMetadataManager) throws Exception { + OMMetadataManager omMetadataManager) throws Exception { String renameKey = omMetadataManager.getRenameKey(volumeName, bucketName, trxnLogIndex); omMetadataManager.getSnapshotRenamedTable().put(renameKey, key); return renameKey;