From db92abc581d0aa86a4bdd5a1c5fa030090664695 Mon Sep 17 00:00:00 2001 From: sumitagrawl Date: Fri, 5 May 2023 11:59:58 +0530 Subject: [PATCH 1/3] HDDS-8463. S3 key uniqueness in deletedTable --- .../ozone/om/request/file/OMFileRequest.java | 16 -------- .../ozone/om/request/key/OMKeyRequest.java | 2 - .../S3MultipartUploadCompleteRequest.java | 24 +++++------- ...MultipartUploadCompleteRequestWithFSO.java | 10 ++--- .../S3MultipartUploadAbortResponse.java | 23 ++++++------ .../S3MultipartUploadCommitPartResponse.java | 24 ++++++------ .../S3MultipartUploadCompleteResponse.java | 30 ++++++--------- ...ultipartUploadCompleteResponseWithFSO.java | 7 ++-- .../TestS3MultipartUploadCompleteRequest.java | 37 +++++++++++++------ ...MultipartUploadCompleteRequestWithFSO.java | 16 -------- .../s3/multipart/TestS3MultipartResponse.java | 12 +++--- .../TestS3MultipartUploadAbortResponse.java | 8 +++- ...tipartUploadCommitPartResponseWithFSO.java | 19 +++++++--- ...ultipartUploadCompleteResponseWithFSO.java | 12 +++--- 14 files changed, 108 insertions(+), 132 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java index 662780c94d61..99ff0e72869a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java @@ -47,7 +47,6 @@ import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import org.slf4j.Logger; @@ -513,21 +512,6 @@ public static void addFileTableCacheEntry( .addCacheEntry(dbFileKey, omFileInfo, trxnLogIndex); } - /** - * Updating the list of OmKeyInfo eligible for deleting blocks. - * - * @param omMetadataManager OM Metadata Manager - * @param dbDeletedKey Ozone key in deletion table - * @param keysToDelete Repeated OMKeyInfos - * @param trxnLogIndex transaction log index - */ - public static void addDeletedTableCacheEntry( - OMMetadataManager omMetadataManager, String dbDeletedKey, - RepeatedOmKeyInfo keysToDelete, long trxnLogIndex) { - omMetadataManager.getDeletedTable().addCacheEntry( - dbDeletedKey, keysToDelete, trxnLogIndex); - } - /** * Adding omKeyInfo to open file table. * diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java index 43e4bb697d40..edbebdc842ef 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java @@ -796,9 +796,7 @@ protected String getDBMultipartOpenKey(String volumeName, String bucketName, /** * Prepare key for deletion service on overwrite. * - * @param dbOzoneKey key to point to an object in RocksDB * @param keyToDelete OmKeyInfo of a key to be in deleteTable - * @param omMetadataManager * @param trxnLogIndex * @param isRatisEnabled * @return Old keys eligible for deletion. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java index 079b5e85ec8d..318c97461f97 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java @@ -44,7 +44,6 @@ import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; -import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.om.request.key.OMKeyRequest; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator; @@ -206,11 +205,11 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, dbOzoneKey, partKeyInfoMap, partLocationInfos, dataSize); //Find all unused parts. - List unUsedParts = new ArrayList<>(); + List allKeyInfoToRemove = new ArrayList<>(); for (Map.Entry< Integer, PartKeyInfo> partKeyInfo : partKeyInfoMap.entrySet()) { if (!partNumbers.contains(partKeyInfo.getKey())) { - unUsedParts.add(OmKeyInfo + allKeyInfoToRemove.add(OmKeyInfo .getFromProtobuf(partKeyInfo.getValue().getPartKeyInfo())); } } @@ -219,14 +218,14 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, // creation and key commit, old versions will be just overwritten and // not kept. Bucket versioning will be effective from the first key // creation after the knob turned on. - RepeatedOmKeyInfo oldKeyVersionsToDelete = null; OmKeyInfo keyToDelete = omMetadataManager.getKeyTable(getBucketLayout()).get(dbOzoneKey); long usedBytesDiff = 0; boolean isNamespaceUpdate = false; if (keyToDelete != null && !omBucketInfo.getIsVersionEnabled()) { - oldKeyVersionsToDelete = getOldVersionsToCleanUp( + RepeatedOmKeyInfo oldKeyVersionsToDelete = getOldVersionsToCleanUp( keyToDelete, trxnLogIndex, ozoneManager.isRatisEnabled()); + allKeyInfoToRemove.addAll(oldKeyVersionsToDelete.getOmKeyInfoList()); usedBytesDiff -= keyToDelete.getReplicatedSize(); } else { checkBucketQuotaInNamespace(omBucketInfo, 1L); @@ -247,11 +246,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, updateCache(omMetadataManager, dbBucketKey, omBucketInfo, dbOzoneKey, dbMultipartOpenKey, multipartKey, omKeyInfo, trxnLogIndex); - if (oldKeyVersionsToDelete != null) { - OMFileRequest.addDeletedTableCacheEntry(omMetadataManager, dbOzoneKey, - oldKeyVersionsToDelete, trxnLogIndex); - } - omResponse.setCompleteMultiPartUploadResponse( MultipartUploadCompleteResponse.newBuilder() .setVolume(requestedVolume) @@ -263,7 +257,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, long bucketId = omMetadataManager.getBucketId(volumeName, bucketName); omClientResponse = getOmClientResponse(multipartKey, omResponse, dbMultipartOpenKey, - omKeyInfo, unUsedParts, omBucketInfo, oldKeyVersionsToDelete, + omKeyInfo, allKeyInfoToRemove, omBucketInfo, volumeId, bucketId); result = Result.SUCCESS; @@ -302,13 +296,13 @@ protected S3MultipartUploadCompleteResponse getOmClientResponse( @SuppressWarnings("parameternumber") protected OMClientResponse getOmClientResponse(String multipartKey, OMResponse.Builder omResponse, String dbMultipartOpenKey, - OmKeyInfo omKeyInfo, List unUsedParts, - OmBucketInfo omBucketInfo, RepeatedOmKeyInfo oldKeyVersionsToDelete, + OmKeyInfo omKeyInfo, List allKeyInfoToRemove, + OmBucketInfo omBucketInfo, long volumeId, long bucketId) { return new S3MultipartUploadCompleteResponse(omResponse.build(), - multipartKey, dbMultipartOpenKey, omKeyInfo, unUsedParts, - getBucketLayout(), omBucketInfo, oldKeyVersionsToDelete); + multipartKey, dbMultipartOpenKey, omKeyInfo, allKeyInfoToRemove, + getBucketLayout(), omBucketInfo); } protected void checkDirectoryAlreadyExists(OzoneManager ozoneManager, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequestWithFSO.java index ae31f949fc4c..80ac2de8d30c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequestWithFSO.java @@ -24,7 +24,6 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.om.response.s3.multipart.S3MultipartUploadCompleteResponse; @@ -162,13 +161,12 @@ protected S3MultipartUploadCompleteResponse getOmClientResponse( protected OMClientResponse getOmClientResponse(String multipartKey, OzoneManagerProtocolProtos.OMResponse.Builder omResponse, String dbMultipartOpenKey, OmKeyInfo omKeyInfo, - List unUsedParts, OmBucketInfo omBucketInfo, - RepeatedOmKeyInfo oldKeyVersionsToDelete, long volumeId, long bucketId) { + List allKeyInfoToRemove, OmBucketInfo omBucketInfo, + long volumeId, long bucketId) { return new S3MultipartUploadCompleteResponseWithFSO(omResponse.build(), - multipartKey, dbMultipartOpenKey, omKeyInfo, unUsedParts, - getBucketLayout(), omBucketInfo, oldKeyVersionsToDelete, - volumeId, bucketId); + multipartKey, dbMultipartOpenKey, omKeyInfo, allKeyInfoToRemove, + getBucketLayout(), omBucketInfo, volumeId, bucketId); } @Override diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java index 29dbd88856ae..2d9133af08d2 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java @@ -97,19 +97,20 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, OmKeyInfo currentKeyPartInfo = OmKeyInfo.getFromProtobuf(partKeyInfo.getPartKeyInfo()); - RepeatedOmKeyInfo repeatedOmKeyInfo = - omMetadataManager.getDeletedTable().get(partKeyInfo.getPartName()); - - repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(currentKeyPartInfo, - repeatedOmKeyInfo, omMultipartKeyInfo.getUpdateID(), isRatisEnabled); + RepeatedOmKeyInfo repeatedOmKeyInfo = OmUtils.prepareKeyForDelete( + currentKeyPartInfo, null, omMultipartKeyInfo.getUpdateID(), + isRatisEnabled); + // multi-part key format is volumeName/bucketName/keyName/uploadId + String deleteKey = omMetadataManager.getOzoneDeletePathKey( + currentKeyPartInfo.getObjectID(), multipartKey); omMetadataManager.getDeletedTable().putWithBatch(batchOperation, - partKeyInfo.getPartName(), repeatedOmKeyInfo); - - // update bucket usedBytes. - omMetadataManager.getBucketTable().putWithBatch(batchOperation, - omMetadataManager.getBucketKey(omBucketInfo.getVolumeName(), - omBucketInfo.getBucketName()), omBucketInfo); + deleteKey, repeatedOmKeyInfo); } + + // update bucket usedBytes. + omMetadataManager.getBucketTable().putWithBatch(batchOperation, + omMetadataManager.getBucketKey(omBucketInfo.getVolumeName(), + omBucketInfo.getBucketName()), omBucketInfo); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java index c4739eb45f12..d6da44ae91cd 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java @@ -102,15 +102,15 @@ public void checkAndUpdateDB(OMMetadataManager omMetadataManager, // multipart upload. So, delete this part information. RepeatedOmKeyInfo repeatedOmKeyInfo = - omMetadataManager.getDeletedTable().get(openKey); - - repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(openPartKeyInfoToBeDeleted, - repeatedOmKeyInfo, openPartKeyInfoToBeDeleted.getUpdateID(), + null, openPartKeyInfoToBeDeleted.getUpdateID(), isRatisEnabled); + // multi-part key format is volumeName/bucketName/keyName/uploadId + String deleteKey = omMetadataManager.getOzoneDeletePathKey( + openPartKeyInfoToBeDeleted.getObjectID(), multipartKey); omMetadataManager.getDeletedTable().putWithBatch(batchOperation, - openKey, repeatedOmKeyInfo); + deleteKey, repeatedOmKeyInfo); } if (getOMResponse().getStatus() == OK) { @@ -135,15 +135,15 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, OmKeyInfo partKeyToBeDeleted = OmKeyInfo.getFromProtobuf(oldPartKeyInfo.getPartKeyInfo()); - RepeatedOmKeyInfo repeatedOmKeyInfo = - omMetadataManager.getDeletedTable() - .get(oldPartKeyInfo.getPartName()); - - repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(partKeyToBeDeleted, - repeatedOmKeyInfo, omMultipartKeyInfo.getUpdateID(), isRatisEnabled); + RepeatedOmKeyInfo repeatedOmKeyInfo = OmUtils.prepareKeyForDelete( + partKeyToBeDeleted, null, + omMultipartKeyInfo.getUpdateID(), isRatisEnabled); + // multi-part key format is volumeName/bucketName/keyName/uploadId + String deleteKey = omMetadataManager.getOzoneDeletePathKey( + partKeyToBeDeleted.getObjectID(), multipartKey); omMetadataManager.getDeletedTable().putWithBatch(batchOperation, - oldPartKeyInfo.getPartName(), repeatedOmKeyInfo); + deleteKey, repeatedOmKeyInfo); } omMetadataManager.getMultipartInfoTable().putWithBatch(batchOperation, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java index 52448b0a955c..829457cd4bd0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java @@ -55,9 +55,8 @@ public class S3MultipartUploadCompleteResponse extends OmKeyResponse { private String multipartKey; private String multipartOpenKey; private OmKeyInfo omKeyInfo; - private List partsUnusedList; + private List allKeyInfoToRemove; private OmBucketInfo omBucketInfo; - private RepeatedOmKeyInfo keyVersionsToDelete; @SuppressWarnings("checkstyle:ParameterNumber") public S3MultipartUploadCompleteResponse( @@ -65,17 +64,15 @@ public S3MultipartUploadCompleteResponse( @Nonnull String multipartKey, @Nonnull String multipartOpenKey, @Nonnull OmKeyInfo omKeyInfo, - @Nonnull List unUsedParts, + @Nonnull List allKeyInfoToRemove, @Nonnull BucketLayout bucketLayout, - @CheckForNull OmBucketInfo omBucketInfo, - RepeatedOmKeyInfo keyVersionsToDelete) { + @CheckForNull OmBucketInfo omBucketInfo) { super(omResponse, bucketLayout); - this.partsUnusedList = unUsedParts; + this.allKeyInfoToRemove = allKeyInfoToRemove; this.multipartKey = multipartKey; this.multipartOpenKey = multipartOpenKey; this.omKeyInfo = omKeyInfo; this.omBucketInfo = omBucketInfo; - this.keyVersionsToDelete = keyVersionsToDelete; } /** @@ -99,23 +96,18 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, multipartKey); // 2. Add key to KeyTable - String ozoneKey = addToKeyTable(omMetadataManager, batchOperation); + addToKeyTable(omMetadataManager, batchOperation); // 3. Delete unused parts - if (!partsUnusedList.isEmpty()) { + if (!allKeyInfoToRemove.isEmpty()) { // Add unused parts to deleted key table. - if (keyVersionsToDelete == null) { - keyVersionsToDelete = new RepeatedOmKeyInfo(partsUnusedList); - } else { - for (OmKeyInfo unusedParts : partsUnusedList) { - keyVersionsToDelete.addOmKeyInfo(unusedParts); - } + for (OmKeyInfo keyInfoToRemove : allKeyInfoToRemove) { + String deleteKey = omMetadataManager.getOzoneDeletePathKey( + keyInfoToRemove.getObjectID(), multipartKey); + omMetadataManager.getDeletedTable().putWithBatch(batchOperation, + deleteKey, new RepeatedOmKeyInfo(keyInfoToRemove)); } } - if (keyVersionsToDelete != null) { - omMetadataManager.getDeletedTable().putWithBatch(batchOperation, - ozoneKey, keyVersionsToDelete); - } // update bucket usedBytes, only when total bucket size has changed // due to unused parts cleanup or an overwritten version. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponseWithFSO.java index 1b3389d7cf42..72a1bd87c225 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponseWithFSO.java @@ -60,13 +60,12 @@ public S3MultipartUploadCompleteResponseWithFSO( @Nonnull String multipartKey, @Nonnull String multipartOpenKey, @Nonnull OmKeyInfo omKeyInfo, - @Nonnull List unUsedParts, + @Nonnull List allKeyInfoToRemove, @Nonnull BucketLayout bucketLayout, @CheckForNull OmBucketInfo omBucketInfo, - RepeatedOmKeyInfo keysToDelete, @Nonnull long volumeId, @Nonnull long bucketId) { - super(omResponse, multipartKey, multipartOpenKey, omKeyInfo, unUsedParts, - bucketLayout, omBucketInfo, keysToDelete); + super(omResponse, multipartKey, multipartOpenKey, omKeyInfo, + allKeyInfoToRemove, bucketLayout, omBucketInfo); this.volumeId = volumeId; this.bucketId = bucketId; } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java index 842710cecae8..6867c8f73e1c 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java @@ -23,6 +23,8 @@ import java.util.List; import java.util.UUID; +import org.apache.hadoop.hdds.utils.db.BatchOperation; +import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; @@ -63,34 +65,39 @@ public void testValidateAndUpdateCacheSuccess() throws Exception { OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, getBucketLayout()); - checkValidateAndUpdateCacheSuccess(volumeName, bucketName, keyName); - checkDeleteTableCount(volumeName, bucketName, keyName, 0); + String uploadId = checkValidateAndUpdateCacheSuccess( + volumeName, bucketName, keyName); + checkDeleteTableCount(volumeName, bucketName, keyName, 0, uploadId); // Do it twice to test overwrite - checkValidateAndUpdateCacheSuccess(volumeName, bucketName, keyName); + uploadId = checkValidateAndUpdateCacheSuccess(volumeName, bucketName, keyName); // After overwrite, one entry must be in delete table - checkDeleteTableCount(volumeName, bucketName, keyName, 1); + checkDeleteTableCount(volumeName, bucketName, keyName, 1, uploadId); } public void checkDeleteTableCount(String volumeName, - String bucketName, String keyName, int count) throws Exception { - String dbOzoneKey = getOzoneDBKey(volumeName, bucketName, keyName); - RepeatedOmKeyInfo keysToDelete = - omMetadataManager.getDeletedTable().get(dbOzoneKey); + String bucketName, String keyName, int count, String uploadId) + throws Exception { + String dbOzoneKey = getMultipartKey(volumeName, bucketName, keyName, + uploadId); + List> rangeKVs + = omMetadataManager.getDeletedTable().getRangeKVs( + null, 100, dbOzoneKey); // deleted key entries count is expected to be 0 if (count == 0) { - Assert.assertNull(keysToDelete); + Assert.assertTrue(rangeKVs.size() == 0); return; } - Assert.assertNotNull(keysToDelete); + Assert.assertTrue(rangeKVs.size() >= 1); // Count must consider unused parts on commit - Assert.assertEquals(count, keysToDelete.getOmKeyInfoList().size()); + Assert.assertEquals(count, + rangeKVs.get(0).getValue().getOmKeyInfoList().size()); } - private void checkValidateAndUpdateCacheSuccess(String volumeName, + private String checkValidateAndUpdateCacheSuccess(String volumeName, String bucketName, String keyName) throws Exception { OMRequest initiateMPURequest = doPreExecuteInitiateMPU(volumeName, @@ -136,6 +143,11 @@ private void checkValidateAndUpdateCacheSuccess(String volumeName, s3MultipartUploadCompleteRequest.validateAndUpdateCache(ozoneManager, 3L, ozoneManagerDoubleBufferHelper); + BatchOperation batchOperation + = omMetadataManager.getStore().initBatchOperation(); + omClientResponse.checkAndUpdateDB(omMetadataManager, batchOperation); + omMetadataManager.getStore().commitBatchOperation(batchOperation); + Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, omClientResponse.getOMResponse().getStatus()); @@ -156,6 +168,7 @@ private void checkValidateAndUpdateCacheSuccess(String volumeName, omMetadataManager.getBucketKey(volumeName, bucketName))) .getCacheValue(); Assert.assertEquals(getNamespaceCount(), omBucketInfo.getUsedNamespace()); + return multipartUploadID; } protected void addVolumeAndBucket(String volumeName, String bucketName) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java index 8ff94bca3a22..7beb56bc78a4 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java @@ -89,22 +89,6 @@ protected void addKeyToTable(String volumeName, String bucketName, omMetadataManager); } - @Override - protected String getMultipartKey(String volumeName, String bucketName, - String keyName, String multipartUploadID) throws IOException { - OzoneFileStatus keyStatus = OMFileRequest.getOMKeyInfoIfExists( - omMetadataManager, volumeName, - bucketName, keyName, 0); - - Assert.assertNotNull("key not found in DB!", keyStatus); - final long volumeId = omMetadataManager.getVolumeId(volumeName); - final long bucketId = omMetadataManager.getBucketId(volumeName, - bucketName); - return omMetadataManager.getMultipartKey(volumeId, bucketId, - keyStatus.getKeyInfo().getParentObjectID(), - keyStatus.getTrimmedName(), multipartUploadID); - } - private long getParentID(String volumeName, String bucketName, String keyName) throws IOException { Path keyPath = Paths.get(keyName); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java index c716cb96013e..a6f90828f37a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java @@ -37,7 +37,6 @@ import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.junit.After; import org.junit.Before; import org.junit.Rule; @@ -161,6 +160,7 @@ public PartKeyInfo createPartKeyInfo(String volumeName, String bucketName, .setDataSize(100L) // Just set dummy size for testing .setCreationTime(Time.now()) .setModificationTime(Time.now()) + .setObjectID(UUID.randomUUID().hashCode()) .setType(HddsProtos.ReplicationType.RATIS) .setFactor(HddsProtos.ReplicationFactor.ONE).build()).build(); } @@ -183,6 +183,7 @@ public PartKeyInfo createPartKeyInfoFSO( .setDataSize(100L) // Just set dummy size for testing .setCreationTime(Time.now()) .setModificationTime(Time.now()) + .setObjectID(UUID.randomUUID().hashCode()) .setParentID(parentID) .setType(HddsProtos.ReplicationType.RATIS) .setFactor(HddsProtos.ReplicationFactor.ONE).build()).build(); @@ -299,9 +300,8 @@ public S3MultipartUploadCompleteResponse createS3CompleteMPUResponseFSO( String volumeName, String bucketName, long parentID, String keyName, String multipartUploadID, OmKeyInfo omKeyInfo, OzoneManagerProtocolProtos.Status status, - List unUsedParts, - OmBucketInfo omBucketInfo, - RepeatedOmKeyInfo keysToDelete) throws IOException { + List allKeyInfoToRemove, + OmBucketInfo omBucketInfo) throws IOException { String multipartKey = omMetadataManager @@ -323,8 +323,8 @@ public S3MultipartUploadCompleteResponse createS3CompleteMPUResponseFSO( .setVolume(volumeName).setKey(keyName)).build(); return new S3MultipartUploadCompleteResponseWithFSO(omResponse, - multipartKey, multipartOpenKey, omKeyInfo, unUsedParts, - getBucketLayout(), omBucketInfo, keysToDelete, volumeId, bucketId); + multipartKey, multipartOpenKey, omKeyInfo, allKeyInfoToRemove, + getBucketLayout(), omBucketInfo, volumeId, bucketId); } protected S3InitiateMultipartUploadResponse getS3InitiateMultipartUploadResp( diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponse.java index 5f830ed85742..5a4e131f8348 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponse.java @@ -168,10 +168,14 @@ public void testAddDBToBatchWithParts() throws Exception { omMetadataManager.getDeletedTable()) == 2); String part1DeletedKeyName = - omMultipartKeyInfo.getPartKeyInfo(1).getPartName(); + omMetadataManager.getOzoneDeletePathKey( + omMultipartKeyInfo.getPartKeyInfo(1).getPartKeyInfo() + .getObjectID(), multipartKey); String part2DeletedKeyName = - omMultipartKeyInfo.getPartKeyInfo(2).getPartName(); + omMetadataManager.getOzoneDeletePathKey( + omMultipartKeyInfo.getPartKeyInfo(2).getPartKeyInfo() + .getObjectID(), multipartKey); Assert.assertNotNull(omMetadataManager.getDeletedTable().get( part1DeletedKeyName)); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCommitPartResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCommitPartResponseWithFSO.java index 605fe2473134..5308d44b6597 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCommitPartResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCommitPartResponseWithFSO.java @@ -18,6 +18,8 @@ package org.apache.hadoop.ozone.om.response.s3.multipart; +import java.util.List; +import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; @@ -149,7 +151,9 @@ public void testAddDBToBatchWithParts() throws Exception { omMetadataManager.getDeletedTable())); String part1DeletedKeyName = - omMultipartKeyInfo.getPartKeyInfo(1).getPartName(); + omMetadataManager.getOzoneDeletePathKey( + omMultipartKeyInfo.getPartKeyInfo(1).getPartKeyInfo() + .getObjectID(), multipartKey); Assert.assertNotNull(omMetadataManager.getDeletedTable().get( part1DeletedKeyName)); @@ -203,9 +207,10 @@ public void testWithMultipartUploadError() throws Exception { String openKey = omMetadataManager.getOpenFileName(volumeId, bucketId, parentID, fileName, clientId); + String keyNameInvalid = keyName + "invalid"; S3MultipartUploadCommitPartResponse s3MultipartUploadCommitPartResponse = createS3CommitMPUResponseFSO(volumeName, bucketName, parentID, - keyName + "invalid", multipartUploadID, + keyNameInvalid, multipartUploadID, omMultipartKeyInfo.getPartKeyInfo(1), omMultipartKeyInfo, OzoneManagerProtocolProtos.Status .NO_SUCH_MULTIPART_UPLOAD_ERROR, openKey); @@ -223,9 +228,13 @@ public void testWithMultipartUploadError() throws Exception { // openkey entry should be there in delete table. Assert.assertEquals(1, omMetadataManager.countRowsInTable( omMetadataManager.getDeletedTable())); - - Assert.assertNotNull(omMetadataManager.getDeletedTable().get( - openKey)); + String deletedKey = omMetadataManager + .getMultipartKey(volumeName, bucketName, keyNameInvalid, + multipartUploadID); + List> rangeKVs + = omMetadataManager.getDeletedTable().getRangeKVs( + null, 100, deletedKey); + Assert.assertTrue(rangeKVs.size() > 0); } private String getKeyName() { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCompleteResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCompleteResponseWithFSO.java index f09e6dcf70dd..d0d6e0102007 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCompleteResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCompleteResponseWithFSO.java @@ -114,7 +114,7 @@ public void testAddDBToBatch() throws Exception { createS3CompleteMPUResponseFSO(volumeName, bucketName, parentID, keyName, multipartUploadID, omKeyInfoFSO, OzoneManagerProtocolProtos.Status.OK, unUsedParts, - omBucketInfo, null); + omBucketInfo); s3MultipartUploadCompleteResponse.addToDBBatch(omMetadataManager, batchOperation); @@ -199,7 +199,7 @@ public void testAddDBToBatchWithNullBucketInfo() throws Exception { createS3CompleteMPUResponseFSO(volumeName, bucketName, parentID, keyName, multipartUploadID, omKeyInfoFSO, OzoneManagerProtocolProtos.Status.OK, unUsedParts, - null, null); + null); s3MultipartUploadCompleteResponse.addToDBBatch(omMetadataManager, batchOperation); @@ -270,7 +270,7 @@ public void testAddDBToBatchWithPartsWithKeyInDeleteTable() throws Exception { // As 1 unused parts and 1 previously put-and-deleted object exist, // so 2 entries should be there in delete table. - Assert.assertEquals(2, omMetadataManager.countRowsInTable( + Assert.assertEquals(3, omMetadataManager.countRowsInTable( omMetadataManager.getDeletedTable())); } @@ -323,7 +323,7 @@ private long runAddDBToBatchWithParts(String volumeName, createS3CompleteMPUResponseFSO(volumeName, bucketName, parentID, keyName, multipartUploadID, omKeyInfoFSO, OzoneManagerProtocolProtos.Status.OK, unUsedParts, - omBucketInfo, null); + omBucketInfo); s3MultipartUploadCompleteResponse.addToDBBatch(omMetadataManager, batchOperation); @@ -382,8 +382,8 @@ private OmKeyInfo commitS3MultipartUpload(String volumeName, Assert.assertEquals(deleteEntryCount, omMetadataManager.countRowsInTable( omMetadataManager.getDeletedTable())); - String part1DeletedKeyName = - omMultipartKeyInfo.getPartKeyInfo(1).getPartName(); + String part1DeletedKeyName = omMetadataManager.getOzoneDeletePathKey( + omMultipartKeyInfo.getPartKeyInfo(1).getPartKeyInfo().getObjectID(), multipartKey); Assert.assertNotNull(omMetadataManager.getDeletedTable().get( part1DeletedKeyName)); From eb71ca43895dd6c11ee7243c0f33b4f1dae5a2ce Mon Sep 17 00:00:00 2001 From: sumitagrawl Date: Fri, 5 May 2023 12:09:33 +0530 Subject: [PATCH 2/3] HDDS-8463. S3 key uniqueness in deletedTable --- .../s3/multipart/S3MultipartUploadCompleteResponseWithFSO.java | 1 - .../s3/multipart/TestS3MultipartUploadCompleteRequest.java | 3 ++- .../multipart/TestS3MultipartUploadCompleteRequestWithFSO.java | 1 - .../TestS3MultipartUploadCompleteResponseWithFSO.java | 3 ++- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponseWithFSO.java index 72a1bd87c225..3a6e1e39d56b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponseWithFSO.java @@ -23,7 +23,6 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.om.response.CleanupTableInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java index 6867c8f73e1c..2983d6c8d394 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java @@ -70,7 +70,8 @@ public void testValidateAndUpdateCacheSuccess() throws Exception { checkDeleteTableCount(volumeName, bucketName, keyName, 0, uploadId); // Do it twice to test overwrite - uploadId = checkValidateAndUpdateCacheSuccess(volumeName, bucketName, keyName); + uploadId = checkValidateAndUpdateCacheSuccess(volumeName, bucketName, + keyName); // After overwrite, one entry must be in delete table checkDeleteTableCount(volumeName, bucketName, keyName, 1, uploadId); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java index 7beb56bc78a4..9afcfa698008 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java @@ -23,7 +23,6 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; -import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCompleteResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCompleteResponseWithFSO.java index d0d6e0102007..1598234d838a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCompleteResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCompleteResponseWithFSO.java @@ -383,7 +383,8 @@ private OmKeyInfo commitS3MultipartUpload(String volumeName, omMetadataManager.getDeletedTable())); String part1DeletedKeyName = omMetadataManager.getOzoneDeletePathKey( - omMultipartKeyInfo.getPartKeyInfo(1).getPartKeyInfo().getObjectID(), multipartKey); + omMultipartKeyInfo.getPartKeyInfo(1).getPartKeyInfo().getObjectID(), + multipartKey); Assert.assertNotNull(omMetadataManager.getDeletedTable().get( part1DeletedKeyName)); From 170abc2732fb7e075f15da7c3a37c568a8b87297 Mon Sep 17 00:00:00 2001 From: sumitagrawl Date: Wed, 10 May 2023 20:10:36 +0530 Subject: [PATCH 3/3] HDDS-8463. S3 key uniqueness in deletedTable --- .../java/org/apache/hadoop/ozone/OmUtils.java | 15 +++------------ .../hadoop/ozone/om/request/key/OMKeyRequest.java | 2 +- .../response/key/AbstractOMKeyDeleteResponse.java | 4 ++-- .../key/OMDirectoriesPurgeResponseWithFSO.java | 2 +- .../multipart/S3MultipartUploadAbortResponse.java | 2 +- .../S3MultipartUploadCommitPartResponse.java | 4 ++-- .../ozone/om/request/OMRequestTestUtils.java | 2 +- .../om/response/key/TestOMKeyCommitResponse.java | 2 +- 8 files changed, 12 insertions(+), 21 deletions(-) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java index 358f8ff3f9a2..57f92fc5e8d0 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java @@ -447,7 +447,6 @@ public static File createOMDir(String dirPath) { * repeatedOmKeyInfo instance. * 3. Set the updateID to the transactionLogIndex. * @param keyInfo args supplied by client - * @param repeatedOmKeyInfo key details from deletedTable * @param trxnLogIndex For Multipart keys, this is the transactionLogIndex * of the MultipartUploadAbort request which needs to * be set as the updateID of the partKeyInfos. @@ -456,8 +455,7 @@ public static File createOMDir(String dirPath) { * @return {@link RepeatedOmKeyInfo} */ public static RepeatedOmKeyInfo prepareKeyForDelete(OmKeyInfo keyInfo, - RepeatedOmKeyInfo repeatedOmKeyInfo, long trxnLogIndex, - boolean isRatisEnabled) { + long trxnLogIndex, boolean isRatisEnabled) { // If this key is in a GDPR enforced bucket, then before moving // KeyInfo to deletedTable, remove the GDPR related metadata and // FileEncryptionInfo from KeyInfo. @@ -473,15 +471,8 @@ public static RepeatedOmKeyInfo prepareKeyForDelete(OmKeyInfo keyInfo, // Set the updateID keyInfo.setUpdateID(trxnLogIndex, isRatisEnabled); - if (repeatedOmKeyInfo == null) { - //The key doesn't exist in deletedTable, so create a new instance. - repeatedOmKeyInfo = new RepeatedOmKeyInfo(keyInfo); - } else { - //The key exists in deletedTable, so update existing instance. - repeatedOmKeyInfo.addOmKeyInfo(keyInfo); - } - - return repeatedOmKeyInfo; + //The key doesn't exist in deletedTable, so create a new instance. + return new RepeatedOmKeyInfo(keyInfo); } /** diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java index edbebdc842ef..d8ec4637e28b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java @@ -805,7 +805,7 @@ protected String getDBMultipartOpenKey(String volumeName, String bucketName, protected RepeatedOmKeyInfo getOldVersionsToCleanUp( @Nonnull OmKeyInfo keyToDelete, long trxnLogIndex, boolean isRatisEnabled) throws IOException { - return OmUtils.prepareKeyForDelete(keyToDelete, null, + return OmUtils.prepareKeyForDelete(keyToDelete, trxnLogIndex, isRatisEnabled); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/AbstractOMKeyDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/AbstractOMKeyDeleteResponse.java index 276baca4dea6..7dd89281e0ca 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/AbstractOMKeyDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/AbstractOMKeyDeleteResponse.java @@ -99,7 +99,7 @@ protected void addDeletionToBatch( // if it is not null, then we simply add to the list and store this // instance in deletedTable. RepeatedOmKeyInfo repeatedOmKeyInfo = OmUtils.prepareKeyForDelete( - omKeyInfo, null, omKeyInfo.getUpdateID(), + omKeyInfo, omKeyInfo.getUpdateID(), isRatisEnabled); String delKeyName = omMetadataManager.getOzoneDeletePathKey( omKeyInfo.getObjectID(), keyName); @@ -142,7 +142,7 @@ protected void addDeletionToBatch( // if it is not null, then we simply add to the list and store this // instance in deletedTable. RepeatedOmKeyInfo repeatedOmKeyInfo = OmUtils.prepareKeyForDelete( - omKeyInfo, null, omKeyInfo.getUpdateID(), + omKeyInfo, omKeyInfo.getUpdateID(), isRatisEnabled); omMetadataManager.getDeletedTable().putWithBatch( batchOperation, deleteKeyName, repeatedOmKeyInfo); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMDirectoriesPurgeResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMDirectoriesPurgeResponseWithFSO.java index 06450638be45..b47aae0d26c6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMDirectoriesPurgeResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMDirectoriesPurgeResponseWithFSO.java @@ -130,7 +130,7 @@ public void processPaths(OMMetadataManager omMetadataManager, } RepeatedOmKeyInfo repeatedOmKeyInfo = OmUtils.prepareKeyForDelete( - keyInfo, null, keyInfo.getUpdateID(), isRatisEnabled); + keyInfo, keyInfo.getUpdateID(), isRatisEnabled); String deletedKey = omMetadataManager .getOzoneKey(keyInfo.getVolumeName(), keyInfo.getBucketName(), diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java index 2d9133af08d2..d30608429ea2 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java @@ -98,7 +98,7 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, OmKeyInfo.getFromProtobuf(partKeyInfo.getPartKeyInfo()); RepeatedOmKeyInfo repeatedOmKeyInfo = OmUtils.prepareKeyForDelete( - currentKeyPartInfo, null, omMultipartKeyInfo.getUpdateID(), + currentKeyPartInfo, omMultipartKeyInfo.getUpdateID(), isRatisEnabled); // multi-part key format is volumeName/bucketName/keyName/uploadId String deleteKey = omMetadataManager.getOzoneDeletePathKey( diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java index d6da44ae91cd..f11789b1eff3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java @@ -103,7 +103,7 @@ public void checkAndUpdateDB(OMMetadataManager omMetadataManager, RepeatedOmKeyInfo repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(openPartKeyInfoToBeDeleted, - null, openPartKeyInfoToBeDeleted.getUpdateID(), + openPartKeyInfoToBeDeleted.getUpdateID(), isRatisEnabled); // multi-part key format is volumeName/bucketName/keyName/uploadId String deleteKey = omMetadataManager.getOzoneDeletePathKey( @@ -136,7 +136,7 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, OmKeyInfo.getFromProtobuf(oldPartKeyInfo.getPartKeyInfo()); RepeatedOmKeyInfo repeatedOmKeyInfo = OmUtils.prepareKeyForDelete( - partKeyToBeDeleted, null, + partKeyToBeDeleted, omMultipartKeyInfo.getUpdateID(), isRatisEnabled); // multi-part key format is volumeName/bucketName/keyName/uploadId String deleteKey = omMetadataManager.getOzoneDeletePathKey( diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java index ea687d1fed0c..e5b0271a4b80 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java @@ -841,7 +841,7 @@ public static String deleteKey(String ozoneKey, omMetadataManager.getKeyTable(getDefaultBucketLayout()).delete(ozoneKey); RepeatedOmKeyInfo repeatedOmKeyInfo = OmUtils.prepareKeyForDelete( - omKeyInfo, null, trxnLogIndex, true); + omKeyInfo, trxnLogIndex, true); omMetadataManager.getDeletedTable().put(ozoneKey, repeatedOmKeyInfo); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java index 7316d4c50dac..52b5e83ed783 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java @@ -120,7 +120,7 @@ public void testAddToDBBatchNoOp() throws Exception { public void testAddToDBBatchOnOverwrite() throws Exception { OmKeyInfo omKeyInfo = getOmKeyInfo(); keysToDelete = - OmUtils.prepareKeyForDelete(omKeyInfo, null, 100, false); + OmUtils.prepareKeyForDelete(omKeyInfo, 100, false); Assert.assertNotNull(keysToDelete); testAddToDBBatch();