Skip to content
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,6 @@
import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.slf4j.Logger;
Expand Down Expand Up @@ -513,21 +512,6 @@ public static void addFileTableCacheEntry(
.addCacheEntry(dbFileKey, omFileInfo, trxnLogIndex);
}

/**
* Updating the list of OmKeyInfo eligible for deleting blocks.
*
* @param omMetadataManager OM Metadata Manager
* @param dbDeletedKey Ozone key in deletion table
* @param keysToDelete Repeated OMKeyInfos
* @param trxnLogIndex transaction log index
*/
public static void addDeletedTableCacheEntry(
OMMetadataManager omMetadataManager, String dbDeletedKey,
RepeatedOmKeyInfo keysToDelete, long trxnLogIndex) {
omMetadataManager.getDeletedTable().addCacheEntry(
dbDeletedKey, keysToDelete, trxnLogIndex);
}

/**
* Adding omKeyInfo to open file table.
*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -796,9 +796,7 @@ protected String getDBMultipartOpenKey(String volumeName, String bucketName,
/**
* Prepare key for deletion service on overwrite.
*
* @param dbOzoneKey key to point to an object in RocksDB
* @param keyToDelete OmKeyInfo of a key to be in deleteTable
* @param omMetadataManager
* @param trxnLogIndex
* @param isRatisEnabled
* @return Old keys eligible for deletion.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,6 @@
import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
import org.apache.hadoop.ozone.om.request.key.OMKeyRequest;
import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
import org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator;
Expand Down Expand Up @@ -206,11 +205,11 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
dbOzoneKey, partKeyInfoMap, partLocationInfos, dataSize);

//Find all unused parts.
List<OmKeyInfo> unUsedParts = new ArrayList<>();
List<OmKeyInfo> allKeyInfoToRemove = new ArrayList<>();
for (Map.Entry< Integer, PartKeyInfo> partKeyInfo :
partKeyInfoMap.entrySet()) {
if (!partNumbers.contains(partKeyInfo.getKey())) {
unUsedParts.add(OmKeyInfo
allKeyInfoToRemove.add(OmKeyInfo
.getFromProtobuf(partKeyInfo.getValue().getPartKeyInfo()));
}
}
Expand All @@ -219,14 +218,14 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
// creation and key commit, old versions will be just overwritten and
// not kept. Bucket versioning will be effective from the first key
// creation after the knob turned on.
RepeatedOmKeyInfo oldKeyVersionsToDelete = null;
OmKeyInfo keyToDelete =
omMetadataManager.getKeyTable(getBucketLayout()).get(dbOzoneKey);
long usedBytesDiff = 0;
boolean isNamespaceUpdate = false;
if (keyToDelete != null && !omBucketInfo.getIsVersionEnabled()) {
oldKeyVersionsToDelete = getOldVersionsToCleanUp(
RepeatedOmKeyInfo oldKeyVersionsToDelete = getOldVersionsToCleanUp(
keyToDelete, trxnLogIndex, ozoneManager.isRatisEnabled());
allKeyInfoToRemove.addAll(oldKeyVersionsToDelete.getOmKeyInfoList());
usedBytesDiff -= keyToDelete.getReplicatedSize();
} else {
checkBucketQuotaInNamespace(omBucketInfo, 1L);
Expand All @@ -247,11 +246,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
updateCache(omMetadataManager, dbBucketKey, omBucketInfo, dbOzoneKey,
dbMultipartOpenKey, multipartKey, omKeyInfo, trxnLogIndex);

if (oldKeyVersionsToDelete != null) {
OMFileRequest.addDeletedTableCacheEntry(omMetadataManager, dbOzoneKey,
oldKeyVersionsToDelete, trxnLogIndex);
}

omResponse.setCompleteMultiPartUploadResponse(
MultipartUploadCompleteResponse.newBuilder()
.setVolume(requestedVolume)
Expand All @@ -263,7 +257,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
long bucketId = omMetadataManager.getBucketId(volumeName, bucketName);
omClientResponse =
getOmClientResponse(multipartKey, omResponse, dbMultipartOpenKey,
omKeyInfo, unUsedParts, omBucketInfo, oldKeyVersionsToDelete,
omKeyInfo, allKeyInfoToRemove, omBucketInfo,
volumeId, bucketId);

result = Result.SUCCESS;
Expand Down Expand Up @@ -302,13 +296,13 @@ protected S3MultipartUploadCompleteResponse getOmClientResponse(
@SuppressWarnings("parameternumber")
protected OMClientResponse getOmClientResponse(String multipartKey,
OMResponse.Builder omResponse, String dbMultipartOpenKey,
OmKeyInfo omKeyInfo, List<OmKeyInfo> unUsedParts,
OmBucketInfo omBucketInfo, RepeatedOmKeyInfo oldKeyVersionsToDelete,
OmKeyInfo omKeyInfo, List<OmKeyInfo> allKeyInfoToRemove,
OmBucketInfo omBucketInfo,
long volumeId, long bucketId) {

return new S3MultipartUploadCompleteResponse(omResponse.build(),
multipartKey, dbMultipartOpenKey, omKeyInfo, unUsedParts,
getBucketLayout(), omBucketInfo, oldKeyVersionsToDelete);
multipartKey, dbMultipartOpenKey, omKeyInfo, allKeyInfoToRemove,
getBucketLayout(), omBucketInfo);
}

protected void checkDirectoryAlreadyExists(OzoneManager ozoneManager,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.om.response.s3.multipart.S3MultipartUploadCompleteResponse;
Expand Down Expand Up @@ -162,13 +161,12 @@ protected S3MultipartUploadCompleteResponse getOmClientResponse(
protected OMClientResponse getOmClientResponse(String multipartKey,
OzoneManagerProtocolProtos.OMResponse.Builder omResponse,
String dbMultipartOpenKey, OmKeyInfo omKeyInfo,
List<OmKeyInfo> unUsedParts, OmBucketInfo omBucketInfo,
RepeatedOmKeyInfo oldKeyVersionsToDelete, long volumeId, long bucketId) {
List<OmKeyInfo> allKeyInfoToRemove, OmBucketInfo omBucketInfo,
long volumeId, long bucketId) {

return new S3MultipartUploadCompleteResponseWithFSO(omResponse.build(),
multipartKey, dbMultipartOpenKey, omKeyInfo, unUsedParts,
getBucketLayout(), omBucketInfo, oldKeyVersionsToDelete,
volumeId, bucketId);
multipartKey, dbMultipartOpenKey, omKeyInfo, allKeyInfoToRemove,
getBucketLayout(), omBucketInfo, volumeId, bucketId);
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -97,19 +97,20 @@ public void addToDBBatch(OMMetadataManager omMetadataManager,
OmKeyInfo currentKeyPartInfo =
OmKeyInfo.getFromProtobuf(partKeyInfo.getPartKeyInfo());

RepeatedOmKeyInfo repeatedOmKeyInfo =
omMetadataManager.getDeletedTable().get(partKeyInfo.getPartName());

repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(currentKeyPartInfo,
repeatedOmKeyInfo, omMultipartKeyInfo.getUpdateID(), isRatisEnabled);
RepeatedOmKeyInfo repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(
currentKeyPartInfo, null, omMultipartKeyInfo.getUpdateID(),
isRatisEnabled);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

After this, the repeatedOmKeyInfo parameter of prepareKeyForDelete(..) is always null. Let's remove it.

+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
@@ -446,8 +446,8 @@ public static File createOMDir(String dirPath) {
    * create a new instance to include this key, else we update the existing
    * repeatedOmKeyInfo instance.
    * 3. Set the updateID to the transactionLogIndex.
    * @param keyInfo args supplied by client
-   * @param repeatedOmKeyInfo key details from deletedTable
    * @param trxnLogIndex For Multipart keys, this is the transactionLogIndex
    *                     of the MultipartUploadAbort request which needs to
    *                     be set as the updateID of the partKeyInfos.
@@ -456,8 +456,7 @@ public static File createOMDir(String dirPath) {
    * @return {@link RepeatedOmKeyInfo}
    */
   public static RepeatedOmKeyInfo prepareKeyForDelete(OmKeyInfo keyInfo,
-      RepeatedOmKeyInfo repeatedOmKeyInfo, long trxnLogIndex,
-      boolean isRatisEnabled) {
+      long trxnLogIndex, boolean isRatisEnabled) {
     // If this key is in a GDPR enforced bucket, then before moving
     // KeyInfo to deletedTable, remove the GDPR related metadata and
     // FileEncryptionInfo from KeyInfo.
@@ -473,15 +472,7 @@ public static RepeatedOmKeyInfo prepareKeyForDelete(OmKeyInfo keyInfo,
     // Set the updateID
     keyInfo.setUpdateID(trxnLogIndex, isRatisEnabled);
 
-    if (repeatedOmKeyInfo == null) {
-      //The key doesn't exist in deletedTable, so create a new instance.
-      repeatedOmKeyInfo = new RepeatedOmKeyInfo(keyInfo);
-    } else {
-      //The key exists in deletedTable, so update existing instance.
-      repeatedOmKeyInfo.addOmKeyInfo(keyInfo);
-    }
-
-    return repeatedOmKeyInfo;
+    return new RepeatedOmKeyInfo(keyInfo);
   }

// multi-part key format is volumeName/bucketName/keyName/uploadId
String deleteKey = omMetadataManager.getOzoneDeletePathKey(
currentKeyPartInfo.getObjectID(), multipartKey);
Comment on lines +104 to +105
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It should have partNumber/partName. Object id is the same for all parts.

BTW, let's add a utility method to create multipart delete key.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@szetszwo Each part is created with new object ID as independent upload, and combined to multipart table in repeatedomkeyinfo. ObjectId is not replaced in keyInfo for the part and remains unique.

but part number / name is not unique as user can do re-upload with same part number. If same part number, it will overwrite. And this can cause duplicate entry in deletedTable. So this is not unique.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@sumitagrawl , thanks for the info. Do we have a unit test showing it? If not, could you add one?

Copy link
Contributor Author

@sumitagrawl sumitagrawl May 11, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@szetszwo This is already covered by existing testcase and same is updated with objectId as correction as part of this PR,
https://github.com/apache/ozone/pull/4660/files#diff-c1e307469ed5f710651ae2ec86baed4d9dce85d742c1c282cb1e7d28a776b503

As part of this, 2 parts are deleted with unique key: part1DeletedKeyName and part2DeletedKeyName

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@sumitagrawl , I am asking a unit test. If there is one, which one? I want to run it locally. Thanks.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@szetszwo TestS3MultipartUploadAbortResponse:testAddDBToBatchWithParts cover the scenario as discussed.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@sumitagrawl , tried the testAddDBToBatchWithParts but it is not a real test -- in createPartKeyInfo, it creates parts random an object id but not the id generated by OM.

We should have a test that a real client uploads multipart to OM and then the parts get deleted.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

... in createPartKeyInfo, it creates parts random an object id but not the id generated by OM.

Indeed, the random ids could be the same since randomness does not prevent collision.


omMetadataManager.getDeletedTable().putWithBatch(batchOperation,
partKeyInfo.getPartName(), repeatedOmKeyInfo);

// update bucket usedBytes.
omMetadataManager.getBucketTable().putWithBatch(batchOperation,
omMetadataManager.getBucketKey(omBucketInfo.getVolumeName(),
omBucketInfo.getBucketName()), omBucketInfo);
deleteKey, repeatedOmKeyInfo);
}

// update bucket usedBytes.
omMetadataManager.getBucketTable().putWithBatch(batchOperation,
omMetadataManager.getBucketKey(omBucketInfo.getVolumeName(),
omBucketInfo.getBucketName()), omBucketInfo);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -102,15 +102,15 @@ public void checkAndUpdateDB(OMMetadataManager omMetadataManager,
// multipart upload. So, delete this part information.

RepeatedOmKeyInfo repeatedOmKeyInfo =
omMetadataManager.getDeletedTable().get(openKey);

repeatedOmKeyInfo =
OmUtils.prepareKeyForDelete(openPartKeyInfoToBeDeleted,
repeatedOmKeyInfo, openPartKeyInfoToBeDeleted.getUpdateID(),
null, openPartKeyInfoToBeDeleted.getUpdateID(),
isRatisEnabled);
// multi-part key format is volumeName/bucketName/keyName/uploadId
String deleteKey = omMetadataManager.getOzoneDeletePathKey(
openPartKeyInfoToBeDeleted.getObjectID(), multipartKey);

omMetadataManager.getDeletedTable().putWithBatch(batchOperation,
openKey, repeatedOmKeyInfo);
deleteKey, repeatedOmKeyInfo);
}

if (getOMResponse().getStatus() == OK) {
Expand All @@ -135,15 +135,15 @@ public void addToDBBatch(OMMetadataManager omMetadataManager,
OmKeyInfo partKeyToBeDeleted =
OmKeyInfo.getFromProtobuf(oldPartKeyInfo.getPartKeyInfo());

RepeatedOmKeyInfo repeatedOmKeyInfo =
omMetadataManager.getDeletedTable()
.get(oldPartKeyInfo.getPartName());

repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(partKeyToBeDeleted,
repeatedOmKeyInfo, omMultipartKeyInfo.getUpdateID(), isRatisEnabled);
RepeatedOmKeyInfo repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(
partKeyToBeDeleted, null,
omMultipartKeyInfo.getUpdateID(), isRatisEnabled);
// multi-part key format is volumeName/bucketName/keyName/uploadId
String deleteKey = omMetadataManager.getOzoneDeletePathKey(
partKeyToBeDeleted.getObjectID(), multipartKey);

omMetadataManager.getDeletedTable().putWithBatch(batchOperation,
oldPartKeyInfo.getPartName(), repeatedOmKeyInfo);
deleteKey, repeatedOmKeyInfo);
}

omMetadataManager.getMultipartInfoTable().putWithBatch(batchOperation,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,27 +55,24 @@ public class S3MultipartUploadCompleteResponse extends OmKeyResponse {
private String multipartKey;
private String multipartOpenKey;
private OmKeyInfo omKeyInfo;
private List<OmKeyInfo> partsUnusedList;
private List<OmKeyInfo> allKeyInfoToRemove;
private OmBucketInfo omBucketInfo;
private RepeatedOmKeyInfo keyVersionsToDelete;

@SuppressWarnings("checkstyle:ParameterNumber")
public S3MultipartUploadCompleteResponse(
@Nonnull OMResponse omResponse,
@Nonnull String multipartKey,
@Nonnull String multipartOpenKey,
@Nonnull OmKeyInfo omKeyInfo,
@Nonnull List<OmKeyInfo> unUsedParts,
@Nonnull List<OmKeyInfo> allKeyInfoToRemove,
@Nonnull BucketLayout bucketLayout,
@CheckForNull OmBucketInfo omBucketInfo,
RepeatedOmKeyInfo keyVersionsToDelete) {
@CheckForNull OmBucketInfo omBucketInfo) {
super(omResponse, bucketLayout);
this.partsUnusedList = unUsedParts;
this.allKeyInfoToRemove = allKeyInfoToRemove;
this.multipartKey = multipartKey;
this.multipartOpenKey = multipartOpenKey;
this.omKeyInfo = omKeyInfo;
this.omBucketInfo = omBucketInfo;
this.keyVersionsToDelete = keyVersionsToDelete;
}

/**
Expand All @@ -99,23 +96,18 @@ public void addToDBBatch(OMMetadataManager omMetadataManager,
multipartKey);

// 2. Add key to KeyTable
String ozoneKey = addToKeyTable(omMetadataManager, batchOperation);
addToKeyTable(omMetadataManager, batchOperation);

// 3. Delete unused parts
if (!partsUnusedList.isEmpty()) {
if (!allKeyInfoToRemove.isEmpty()) {
// Add unused parts to deleted key table.
if (keyVersionsToDelete == null) {
keyVersionsToDelete = new RepeatedOmKeyInfo(partsUnusedList);
} else {
for (OmKeyInfo unusedParts : partsUnusedList) {
keyVersionsToDelete.addOmKeyInfo(unusedParts);
}
for (OmKeyInfo keyInfoToRemove : allKeyInfoToRemove) {
String deleteKey = omMetadataManager.getOzoneDeletePathKey(
keyInfoToRemove.getObjectID(), multipartKey);
omMetadataManager.getDeletedTable().putWithBatch(batchOperation,
deleteKey, new RepeatedOmKeyInfo(keyInfoToRemove));
}
}
if (keyVersionsToDelete != null) {
omMetadataManager.getDeletedTable().putWithBatch(batchOperation,
ozoneKey, keyVersionsToDelete);
}

// update bucket usedBytes, only when total bucket size has changed
// due to unused parts cleanup or an overwritten version.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
Expand Down Expand Up @@ -60,13 +59,12 @@ public S3MultipartUploadCompleteResponseWithFSO(
@Nonnull String multipartKey,
@Nonnull String multipartOpenKey,
@Nonnull OmKeyInfo omKeyInfo,
@Nonnull List<OmKeyInfo> unUsedParts,
@Nonnull List<OmKeyInfo> allKeyInfoToRemove,
@Nonnull BucketLayout bucketLayout,
@CheckForNull OmBucketInfo omBucketInfo,
RepeatedOmKeyInfo keysToDelete,
@Nonnull long volumeId, @Nonnull long bucketId) {
super(omResponse, multipartKey, multipartOpenKey, omKeyInfo, unUsedParts,
bucketLayout, omBucketInfo, keysToDelete);
super(omResponse, multipartKey, multipartOpenKey, omKeyInfo,
allKeyInfoToRemove, bucketLayout, omBucketInfo);
this.volumeId = volumeId;
this.bucketId = bucketId;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,8 @@
import java.util.List;
import java.util.UUID;

import org.apache.hadoop.hdds.utils.db.BatchOperation;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.request.OMRequestTestUtils;
Expand Down Expand Up @@ -63,34 +65,40 @@ public void testValidateAndUpdateCacheSuccess() throws Exception {
OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
omMetadataManager, getBucketLayout());

checkValidateAndUpdateCacheSuccess(volumeName, bucketName, keyName);
checkDeleteTableCount(volumeName, bucketName, keyName, 0);
String uploadId = checkValidateAndUpdateCacheSuccess(
volumeName, bucketName, keyName);
checkDeleteTableCount(volumeName, bucketName, keyName, 0, uploadId);

// Do it twice to test overwrite
checkValidateAndUpdateCacheSuccess(volumeName, bucketName, keyName);
uploadId = checkValidateAndUpdateCacheSuccess(volumeName, bucketName,
keyName);
// After overwrite, one entry must be in delete table
checkDeleteTableCount(volumeName, bucketName, keyName, 1);
checkDeleteTableCount(volumeName, bucketName, keyName, 1, uploadId);
}

public void checkDeleteTableCount(String volumeName,
String bucketName, String keyName, int count) throws Exception {
String dbOzoneKey = getOzoneDBKey(volumeName, bucketName, keyName);
RepeatedOmKeyInfo keysToDelete =
omMetadataManager.getDeletedTable().get(dbOzoneKey);
String bucketName, String keyName, int count, String uploadId)
throws Exception {
String dbOzoneKey = getMultipartKey(volumeName, bucketName, keyName,
uploadId);
List<? extends Table.KeyValue<String, RepeatedOmKeyInfo>> rangeKVs
= omMetadataManager.getDeletedTable().getRangeKVs(
null, 100, dbOzoneKey);

// deleted key entries count is expected to be 0
if (count == 0) {
Assert.assertNull(keysToDelete);
Assert.assertTrue(rangeKVs.size() == 0);
return;
}

Assert.assertNotNull(keysToDelete);
Assert.assertTrue(rangeKVs.size() >= 1);

// Count must consider unused parts on commit
Assert.assertEquals(count, keysToDelete.getOmKeyInfoList().size());
Assert.assertEquals(count,
rangeKVs.get(0).getValue().getOmKeyInfoList().size());
}

private void checkValidateAndUpdateCacheSuccess(String volumeName,
private String checkValidateAndUpdateCacheSuccess(String volumeName,
String bucketName, String keyName) throws Exception {

OMRequest initiateMPURequest = doPreExecuteInitiateMPU(volumeName,
Expand Down Expand Up @@ -136,6 +144,11 @@ private void checkValidateAndUpdateCacheSuccess(String volumeName,
s3MultipartUploadCompleteRequest.validateAndUpdateCache(ozoneManager,
3L, ozoneManagerDoubleBufferHelper);

BatchOperation batchOperation
= omMetadataManager.getStore().initBatchOperation();
omClientResponse.checkAndUpdateDB(omMetadataManager, batchOperation);
omMetadataManager.getStore().commitBatchOperation(batchOperation);

Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
omClientResponse.getOMResponse().getStatus());

Expand All @@ -156,6 +169,7 @@ private void checkValidateAndUpdateCacheSuccess(String volumeName,
omMetadataManager.getBucketKey(volumeName, bucketName)))
.getCacheValue();
Assert.assertEquals(getNamespaceCount(), omBucketInfo.getUsedNamespace());
return multipartUploadID;
}

protected void addVolumeAndBucket(String volumeName, String bucketName)
Expand Down
Loading