diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java index d71e03c9b88..99c1eeb4ee7 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java @@ -92,6 +92,11 @@ public class OzoneBucket extends WithMetadata { */ private int listCacheSize; + /** + * Used bytes of the bucket. + */ + private long usedBytes; + /** * Creation time of the bucket. */ @@ -174,6 +179,18 @@ public OzoneBucket(ConfigurationSource conf, ClientProtocol proxy, this.modificationTime = Instant.ofEpochMilli(modificationTime); } + @SuppressWarnings("parameternumber") + public OzoneBucket(ConfigurationSource conf, ClientProtocol proxy, + String volumeName, String bucketName, StorageType storageType, + Boolean versioning, long creationTime, long modificationTime, + Map metadata, String encryptionKeyName, + String sourceVolume, String sourceBucket, long usedBytes) { + this(conf, proxy, volumeName, bucketName, storageType, versioning, + creationTime, metadata, encryptionKeyName, sourceVolume, sourceBucket); + this.usedBytes = usedBytes; + this.modificationTime = Instant.ofEpochMilli(modificationTime); + } + /** * Constructs OzoneBucket instance. * @param conf Configuration object. @@ -416,6 +433,10 @@ public OzoneKeyDetails getKey(String key) throws IOException { return proxy.getKeyDetails(volumeName, name, key); } + public long getUsedBytes() { + return usedBytes; + } + /** * Returns Iterator to iterate over all keys in the bucket. * The result can be restricted using key prefix, will return all diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index fdd93fa0f57..f3e16073501 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -634,7 +634,8 @@ public OzoneBucket getBucketDetails( bucketInfo.getEncryptionKeyInfo() != null ? bucketInfo .getEncryptionKeyInfo().getKeyName() : null, bucketInfo.getSourceVolume(), - bucketInfo.getSourceBucket() + bucketInfo.getSourceBucket(), + bucketInfo.getUsedBytes().sum() ); } @@ -658,7 +659,8 @@ public List listBuckets(String volumeName, String bucketPrefix, bucket.getEncryptionKeyInfo() != null ? bucket .getEncryptionKeyInfo().getKeyName() : null, bucket.getSourceVolume(), - bucket.getSourceBucket())) + bucket.getSourceBucket(), + bucket.getUsedBytes().sum())) .collect(Collectors.toList()); } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java index abbe3955f6b..cde9e19095f 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java @@ -20,6 +20,7 @@ import java.util.ArrayList; import java.util.BitSet; +import java.util.concurrent.atomic.LongAdder; import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; @@ -31,8 +32,7 @@ import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.Auditable; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .BucketInfo; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketInfo; import org.apache.hadoop.ozone.protocolPB.OMPBHelper; import com.google.common.base.Preconditions; @@ -80,6 +80,8 @@ public final class OmBucketInfo extends WithObjectID implements Auditable { private final String sourceBucket; + private final LongAdder usedBytes = new LongAdder(); + /** * Private constructor, constructed via builder. * @param volumeName - Volume name. @@ -93,6 +95,7 @@ public final class OmBucketInfo extends WithObjectID implements Auditable { * @param bekInfo - bucket encryption key info. * @param sourceVolume - source volume for bucket links, null otherwise * @param sourceBucket - source bucket for bucket links, null otherwise + * @param usedBytes - Bucket Quota Usage in bytes. */ @SuppressWarnings("checkstyle:ParameterNumber") private OmBucketInfo(String volumeName, @@ -107,7 +110,8 @@ private OmBucketInfo(String volumeName, Map metadata, BucketEncryptionKeyInfo bekInfo, String sourceVolume, - String sourceBucket) { + String sourceBucket, + long usedBytes) { this.volumeName = volumeName; this.bucketName = bucketName; this.acls = acls; @@ -121,6 +125,7 @@ private OmBucketInfo(String volumeName, this.bekInfo = bekInfo; this.sourceVolume = sourceVolume; this.sourceBucket = sourceBucket; + this.usedBytes.add(usedBytes); } /** @@ -226,6 +231,10 @@ public String getSourceBucket() { return sourceBucket; } + public LongAdder getUsedBytes() { + return usedBytes; + } + public boolean isLink() { return sourceVolume != null && sourceBucket != null; } @@ -261,6 +270,7 @@ public Map toAuditMap() { auditMap.put(OzoneConsts.SOURCE_VOLUME, sourceVolume); auditMap.put(OzoneConsts.SOURCE_BUCKET, sourceBucket); } + auditMap.put(OzoneConsts.USED_BYTES, String.valueOf(this.usedBytes)); return auditMap; } @@ -296,7 +306,8 @@ public Builder toBuilder() { .setSourceVolume(sourceVolume) .setSourceBucket(sourceBucket) .setAcls(acls) - .addAllMetadata(metadata); + .addAllMetadata(metadata) + .setUsedBytes(usedBytes.sum()); } /** @@ -316,6 +327,7 @@ public static class Builder { private BucketEncryptionKeyInfo bekInfo; private String sourceVolume; private String sourceBucket; + private long usedBytes; public Builder() { //Default values @@ -407,6 +419,11 @@ public Builder setSourceBucket(String bucket) { return this; } + public Builder setUsedBytes(long quotaUsage) { + this.usedBytes = quotaUsage; + return this; + } + /** * Constructs the OmBucketInfo. * @return instance of OmBucketInfo. @@ -420,7 +437,7 @@ public OmBucketInfo build() { return new OmBucketInfo(volumeName, bucketName, acls, isVersionEnabled, storageType, creationTime, modificationTime, objectID, updateID, - metadata, bekInfo, sourceVolume, sourceBucket); + metadata, bekInfo, sourceVolume, sourceBucket, usedBytes); } } @@ -438,6 +455,7 @@ public BucketInfo getProtobuf() { .setModificationTime(modificationTime) .setObjectID(objectID) .setUpdateID(updateID) + .setUsedBytes(usedBytes.sum()) .addAllMetadata(KeyValueUtil.toProtobuf(metadata)); if (bekInfo != null && bekInfo.getKeyName() != null) { bib.setBeinfo(OMPBHelper.convert(bekInfo)); @@ -465,6 +483,7 @@ public static OmBucketInfo getFromProtobuf(BucketInfo bucketInfo) { .setIsVersionEnabled(bucketInfo.getIsVersionEnabled()) .setStorageType(StorageType.valueOf(bucketInfo.getStorageType())) .setCreationTime(bucketInfo.getCreationTime()) + .setUsedBytes(bucketInfo.getUsedBytes()) .setModificationTime(bucketInfo.getModificationTime()); if (bucketInfo.hasObjectID()) { obib.setObjectID(bucketInfo.getObjectID()); @@ -500,6 +519,7 @@ public String getObjectInfo() { ", isVersionEnabled='" + isVersionEnabled + "'" + ", storageType='" + storageType + "'" + ", creationTime='" + creationTime + "'" + + ", usedBytes='" + usedBytes.sum() + '\'' + sourceInfo + '}'; } @@ -522,6 +542,7 @@ public boolean equals(Object o) { storageType == that.storageType && objectID == that.objectID && updateID == that.updateID && + usedBytes.sum() == that.usedBytes.sum() && Objects.equals(sourceVolume, that.sourceVolume) && Objects.equals(sourceBucket, that.sourceBucket) && Objects.equals(metadata, that.metadata) && @@ -548,6 +569,7 @@ public String toString() { ", objectID=" + objectID + ", updateID=" + updateID + ", metadata=" + metadata + + ", usedBytes=" + usedBytes.sum() + '}'; } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java index 7b6f93e366d..e45c8bd037c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java @@ -708,10 +708,12 @@ public void testPutKey() throws IOException { } @Test - public void testVolumeUsedBytes() throws IOException { + @SuppressWarnings("methodlength") + public void testVolumeAndBucketUsedBytes() throws IOException { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); OzoneVolume volume = null; + OzoneBucket bucket = null; int blockSize = (int) ozoneManager.getConfiguration().getStorageSize( OZONE_SCM_BLOCK_SIZE, OZONE_SCM_BLOCK_SIZE_DEFAULT, StorageUnit.BYTES); @@ -727,13 +729,15 @@ public void testVolumeUsedBytes() throws IOException { // The initial value should be 0 Assert.assertEquals(0L, volume.getUsedBytes()); volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); + bucket = volume.getBucket(bucketName); //Case1: Test the volumeUsedBytes of ONE replications. String keyName1 = UUID.randomUUID().toString(); writeKey(bucket, keyName1, ONE, value, valueLength); volume = store.getVolume(volumeName); + bucket = volume.getBucket(bucketName); Assert.assertEquals(valueLength, volume.getUsedBytes()); + Assert.assertEquals(valueLength, bucket.getUsedBytes()); currentQuotaUsage += valueLength; // Case2: Test overwrite the same KeyName under ONE Replicates, the @@ -743,16 +747,22 @@ public void testVolumeUsedBytes() throws IOException { // Overwrite the keyName2 writeKey(bucket, keyName2, ONE, value, valueLength); volume = store.getVolume(volumeName); + bucket = volume.getBucket(bucketName); Assert.assertEquals(valueLength * 2 + currentQuotaUsage, volume.getUsedBytes()); + Assert.assertEquals(valueLength * 2 + currentQuotaUsage, + bucket.getUsedBytes()); currentQuotaUsage += valueLength * 2; // Case3: Test the volumeUsedBytes of THREE replications. String keyName3 = UUID.randomUUID().toString(); writeKey(bucket, keyName3, THREE, value, valueLength); volume = store.getVolume(volumeName); + bucket = volume.getBucket(bucketName); Assert.assertEquals(valueLength * 3 + currentQuotaUsage, volume.getUsedBytes()); + Assert.assertEquals(valueLength * 3 + currentQuotaUsage, + bucket.getUsedBytes()); currentQuotaUsage += valueLength * 3; // Case4: Test overwrite the same KeyName under THREE Replicates, the @@ -762,8 +772,11 @@ public void testVolumeUsedBytes() throws IOException { // Overwrite the keyName4 writeKey(bucket, keyName4, THREE, value, valueLength); volume = store.getVolume(volumeName); + bucket = volume.getBucket(bucketName); Assert.assertEquals(valueLength * 3 * 2 + currentQuotaUsage, volume.getUsedBytes()); + Assert.assertEquals(valueLength * 3 * 2 + currentQuotaUsage, + bucket.getUsedBytes()); currentQuotaUsage += valueLength * 3 * 2; //Case5: Do not specify the value Length, simulate HDFS api writing. @@ -771,8 +784,11 @@ public void testVolumeUsedBytes() throws IOException { String keyName5 = UUID.randomUUID().toString(); writeFile(bucket, keyName5, ONE, value, 0); volume = store.getVolume(volumeName); + bucket = volume.getBucket(bucketName); Assert.assertEquals(valueLength + currentQuotaUsage, volume.getUsedBytes()); + Assert.assertEquals(valueLength + currentQuotaUsage, + bucket.getUsedBytes()); currentQuotaUsage += valueLength; // Case6: Do not specify the value Length, simulate HDFS api writing. @@ -783,8 +799,11 @@ public void testVolumeUsedBytes() throws IOException { // Overwrite the keyName6 writeFile(bucket, keyName6, ONE, value, 0); volume = store.getVolume(volumeName); + bucket = volume.getBucket(bucketName); Assert.assertEquals(valueLength * 2 + currentQuotaUsage, volume.getUsedBytes()); + Assert.assertEquals(valueLength * 2 + currentQuotaUsage, + bucket.getUsedBytes()); currentQuotaUsage += valueLength * 2; // Case7: Do not specify the value Length, simulate HDFS api writing. @@ -792,8 +811,11 @@ public void testVolumeUsedBytes() throws IOException { String keyName7 = UUID.randomUUID().toString(); writeFile(bucket, keyName7, THREE, value, 0); volume = store.getVolume(volumeName); + bucket = volume.getBucket(bucketName); Assert.assertEquals(valueLength * 3 + currentQuotaUsage, volume.getUsedBytes()); + Assert.assertEquals(valueLength * 3 + currentQuotaUsage, + bucket.getUsedBytes()); currentQuotaUsage += valueLength * 3; // Case8: Do not specify the value Length, simulate HDFS api writing. @@ -804,23 +826,32 @@ public void testVolumeUsedBytes() throws IOException { // Overwrite the keyName8 writeFile(bucket, keyName8, THREE, value, 0); volume = store.getVolume(volumeName); + bucket = volume.getBucket(bucketName); Assert.assertEquals(valueLength * 3 * 2 + currentQuotaUsage, volume.getUsedBytes()); + Assert.assertEquals(valueLength * 3 * 2 + currentQuotaUsage, + bucket.getUsedBytes()); currentQuotaUsage += valueLength * 3 * 2; // Case9: Test volumeUsedBytes when delete key of ONE replications. bucket.deleteKey(keyName1); volume = store.getVolume(volumeName); + bucket = volume.getBucket(bucketName); Assert.assertEquals(currentQuotaUsage - valueLength, volume.getUsedBytes()); + Assert.assertEquals(currentQuotaUsage - valueLength, + bucket.getUsedBytes()); currentQuotaUsage -= valueLength; // Case10: Test volumeUsedBytes when delete key of THREE // replications. bucket.deleteKey(keyName3); volume = store.getVolume(volumeName); + bucket = volume.getBucket(bucketName); Assert.assertEquals(currentQuotaUsage - valueLength * 3, volume.getUsedBytes()); + Assert.assertEquals(currentQuotaUsage - valueLength * 3, + bucket.getUsedBytes()); currentQuotaUsage -= valueLength * 3; // Case11: Test volumeUsedBytes when Test Delete keys. At this @@ -834,7 +865,9 @@ public void testVolumeUsedBytes() throws IOException { keyList.add(keyName8); bucket.deleteKeys(keyList); volume = store.getVolume(volumeName); + bucket = volume.getBucket(bucketName); Assert.assertEquals(0, volume.getUsedBytes()); + Assert.assertEquals(0, bucket.getUsedBytes()); } @Test @@ -915,7 +948,7 @@ private void writeFile(OzoneBucket bucket, String keyName, } @Test - public void testVolumeQuotaWithUploadPart() throws IOException { + public void testUsedBytesWithUploadPart() throws IOException { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); String keyName = UUID.randomUUID().toString(); @@ -949,10 +982,14 @@ public void testVolumeQuotaWithUploadPart() throws IOException { Assert.assertEquals(valueLength, store.getVolume(volumeName) .getUsedBytes()); + Assert.assertEquals(valueLength, store.getVolume(volumeName) + .getBucket(bucketName).getUsedBytes()); // Abort uploaded partKey and the usedBytes of volume should be 0. bucket.abortMultipartUpload(keyName, uploadID); Assert.assertEquals(0, store.getVolume(volumeName).getUsedBytes()); + Assert.assertEquals(0, store.getVolume(volumeName) + .getBucket(bucketName).getUsedBytes()); } @Test diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index fff29680699..a1536715488 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -499,6 +499,7 @@ message BucketInfo { optional uint64 modificationTime = 11; optional string sourceVolume = 12; optional string sourceBucket = 13; + optional uint64 usedBytes = 14; } enum StorageTypeProto { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java index f76ac985b21..b0273571681 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java @@ -187,6 +187,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, OmKeyInfo omKeyInfo = null; OmVolumeArgs omVolumeArgs = null; + OmBucketInfo omBucketInfo = null; final List locations = new ArrayList<>(); List missingParentInfos; @@ -291,9 +292,13 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, long scmBlockSize = ozoneManager.getScmBlockSize(); omVolumeArgs = getVolumeInfo(omMetadataManager, volumeName); + omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName); + // update usedBytes atomically. - omVolumeArgs.getUsedBytes().add(newLocationList.size() * scmBlockSize - * omKeyInfo.getFactor().getNumber()); + long preAllocatedSpace = newLocationList.size() * scmBlockSize + * omKeyInfo.getFactor().getNumber(); + omVolumeArgs.getUsedBytes().add(preAllocatedSpace); + omBucketInfo.getUsedBytes().add(preAllocatedSpace); // Prepare response omResponse.setCreateFileResponse(CreateFileResponse.newBuilder() @@ -302,7 +307,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, .setOpenVersion(openVersion).build()) .setCmdType(Type.CreateFile); omClientResponse = new OMFileCreateResponse(omResponse.build(), - omKeyInfo, missingParentInfos, clientID, omVolumeArgs); + omKeyInfo, missingParentInfos, clientID, omVolumeArgs, omBucketInfo); result = Result.SUCCESS; } catch (IOException ex) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java index a3239a49f2a..55296da3fe7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java @@ -25,6 +25,7 @@ import com.google.common.base.Optional; import com.google.common.base.Preconditions; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; @@ -166,6 +167,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, OmKeyInfo openKeyInfo; IOException exception = null; OmVolumeArgs omVolumeArgs = null; + OmBucketInfo omBucketInfo = null; try { keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap); @@ -208,14 +210,17 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, long scmBlockSize = ozoneManager.getScmBlockSize(); omVolumeArgs = getVolumeInfo(omMetadataManager, volumeName); + omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName); // update usedBytes atomically. - omVolumeArgs.getUsedBytes().add(newLocationList.size() * scmBlockSize - * openKeyInfo.getFactor().getNumber()); + long preAllocatedSpace = newLocationList.size() * scmBlockSize + * openKeyInfo.getFactor().getNumber(); + omVolumeArgs.getUsedBytes().add(preAllocatedSpace); + omBucketInfo.getUsedBytes().add(preAllocatedSpace); omResponse.setAllocateBlockResponse(AllocateBlockResponse.newBuilder() .setKeyLocation(blockLocation).build()); omClientResponse = new OMAllocateBlockResponse(omResponse.build(), - openKeyInfo, clientID, omVolumeArgs); + openKeyInfo, clientID, omVolumeArgs, omBucketInfo); LOG.debug("Allocated block for Volume:{}, Bucket:{}, OpenKey:{}", volumeName, bucketName, openKeyName); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java index c86ea56e1ce..8882d7d5a63 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java @@ -29,6 +29,7 @@ import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; @@ -123,6 +124,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, IOException exception = null; OmKeyInfo omKeyInfo = null; OmVolumeArgs omVolumeArgs = null; + OmBucketInfo omBucketInfo = null; OMClientResponse omClientResponse = null; boolean bucketLockAcquired = false; Result result; @@ -193,16 +195,19 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, long scmBlockSize = ozoneManager.getScmBlockSize(); int factor = omKeyInfo.getFactor().getNumber(); omVolumeArgs = getVolumeInfo(omMetadataManager, volumeName); + omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName); // update usedBytes atomically. // Block was pre-requested and UsedBytes updated when createKey and // AllocatedBlock. The space occupied by the Key shall be based on // the actual Key size, and the total Block size applied before should // be subtracted. - omVolumeArgs.getUsedBytes().add(omKeyInfo.getDataSize() * factor - - locationInfoList.size() * scmBlockSize * factor); + long correctedSpace = omKeyInfo.getDataSize() * factor - + locationInfoList.size() * scmBlockSize * factor; + omVolumeArgs.getUsedBytes().add(correctedSpace); + omBucketInfo.getUsedBytes().add(correctedSpace); omClientResponse = new OMKeyCommitResponse(omResponse.build(), - omKeyInfo, dbOzoneKey, dbOpenKey, omVolumeArgs); + omKeyInfo, dbOzoneKey, dbOpenKey, omVolumeArgs, omBucketInfo); result = Result.SUCCESS; } catch (IOException ex) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java index 0966cc544da..eeb2ab7c482 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java @@ -198,6 +198,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); OmKeyInfo omKeyInfo = null; OmVolumeArgs omVolumeArgs = null; + OmBucketInfo omBucketInfo = null; final List< OmKeyLocationInfo > locations = new ArrayList<>(); boolean acquireLock = false; @@ -294,6 +295,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, long scmBlockSize = ozoneManager.getScmBlockSize(); omVolumeArgs = getVolumeInfo(omMetadataManager, volumeName); + omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName); // Here we refer to the implementation of HDFS: // If the key size is 600MB, when createKey, keyLocationInfo in @@ -302,8 +304,11 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, // ize is 256MB * 3 * 3. We will allocate more 256MB * 3 * 3 - 600mb * 3 // = 504MB in advance, and we will subtract this part when we finally // commitKey. - omVolumeArgs.getUsedBytes().add(newLocationList.size() * scmBlockSize - * omKeyInfo.getFactor().getNumber()); + long preAllocatedSpace = newLocationList.size() * scmBlockSize + * omKeyInfo.getFactor().getNumber(); + omVolumeArgs.getUsedBytes().add(preAllocatedSpace); + omBucketInfo.getUsedBytes().add(preAllocatedSpace); + // Prepare response omResponse.setCreateKeyResponse(CreateKeyResponse.newBuilder() @@ -312,7 +317,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, .setOpenVersion(openVersion).build()) .setCmdType(Type.CreateKey); omClientResponse = new OMKeyCreateResponse(omResponse.build(), - omKeyInfo, missingParentInfos, clientID, omVolumeArgs); + omKeyInfo, missingParentInfos, clientID, omVolumeArgs, omBucketInfo); result = Result.SUCCESS; } catch (IOException ex) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java index bb820b76f22..e27b7e116c7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java @@ -22,6 +22,7 @@ import java.util.Map; import com.google.common.base.Optional; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; @@ -111,6 +112,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, OMClientResponse omClientResponse = null; Result result = null; OmVolumeArgs omVolumeArgs = null; + OmBucketInfo omBucketInfo = null; try { keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap); volumeName = keyArgs.getVolumeName(); @@ -146,6 +148,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, long quotaReleased = 0; int keyFactor = omKeyInfo.getFactor().getNumber(); omVolumeArgs = getVolumeInfo(omMetadataManager, volumeName); + omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName); OmKeyLocationInfoGroup keyLocationGroup = omKeyInfo.getLatestVersionLocations(); for(OmKeyLocationInfo locationInfo: keyLocationGroup.getLocationList()){ @@ -153,6 +156,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, } // update usedBytes atomically. omVolumeArgs.getUsedBytes().add(-quotaReleased); + omBucketInfo.getUsedBytes().add(-quotaReleased); // No need to add cache entries to delete table. As delete table will // be used by DeleteKeyService only, not used for any client response @@ -161,7 +165,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, omClientResponse = new OMKeyDeleteResponse(omResponse .setDeleteKeyResponse(DeleteKeyResponse.newBuilder()).build(), - omKeyInfo, ozoneManager.isRatisEnabled(), omVolumeArgs); + omKeyInfo, ozoneManager.isRatisEnabled(), omVolumeArgs, omBucketInfo); result = Result.SUCCESS; } catch (IOException ex) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java index d30eb6b7392..a3761a5cac7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java @@ -565,4 +565,19 @@ protected OmVolumeArgs getVolumeInfo(OMMetadataManager omMetadataManager, new CacheKey<>(omMetadataManager.getVolumeKey(volume))) .getCacheValue(); } + + /** + * Return bucket info for the specified bucket. + * @param omMetadataManager + * @param volume + * @param bucket + * @return OmVolumeArgs + * @throws IOException + */ + protected OmBucketInfo getBucketInfo(OMMetadataManager omMetadataManager, + String volume, String bucket) { + return omMetadataManager.getBucketTable().getCacheValue( + new CacheKey<>(omMetadataManager.getBucketKey(volume, bucket))) + .getCacheValue(); + } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java index cb7edd61ff4..907b5013b6c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java @@ -27,6 +27,7 @@ import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.ResolvedBucket; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; @@ -156,6 +157,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, long quotaReleased = 0; OmVolumeArgs omVolumeArgs = getVolumeInfo(omMetadataManager, volumeName); + OmBucketInfo omBucketInfo = + getBucketInfo(omMetadataManager, volumeName, bucketName); // Mark all keys which can be deleted, in cache as deleted. for (OmKeyInfo omKeyInfo : omKeyInfoList) { @@ -173,13 +176,14 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, } // update usedBytes atomically. omVolumeArgs.getUsedBytes().add(-quotaReleased); + omBucketInfo.getUsedBytes().add(-quotaReleased); omClientResponse = new OMKeysDeleteResponse(omResponse .setDeleteKeysResponse(DeleteKeysResponse.newBuilder() .setStatus(deleteStatus).setUnDeletedKeys(unDeletedKeys)) .setStatus(deleteStatus ? OK : PARTIAL_DELETE) .setSuccess(deleteStatus).build(), omKeyInfoList, trxnLogIndex, - ozoneManager.isRatisEnabled(), omVolumeArgs); + ozoneManager.isRatisEnabled(), omVolumeArgs, omBucketInfo); result = Result.SUCCESS; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java index 9c52e39c15d..8b53e7045dc 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java @@ -23,6 +23,7 @@ import java.util.Map; import com.google.common.base.Optional; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; @@ -106,6 +107,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, OMClientResponse omClientResponse = null; Result result = null; OmVolumeArgs omVolumeArgs = null; + OmBucketInfo omBucketInfo = null; try { keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap); volumeName = keyArgs.getVolumeName(); @@ -124,6 +126,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable().get(multipartKey); omVolumeArgs = getVolumeInfo(omMetadataManager, volumeName); + omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName); // If there is no entry in openKeyTable, then there is no multipart // upload initiated for this key. @@ -150,6 +153,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, iterPartKeyInfo.getPartKeyInfo().getDataSize() * keyFactor; } omVolumeArgs.getUsedBytes().add(-quotaReleased); + omBucketInfo.getUsedBytes().add(-quotaReleased); // Update cache of openKeyTable and multipartInfo table. // No need to add the cache entries to delete table, as the entries @@ -165,7 +169,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, omResponse.setAbortMultiPartUploadResponse( MultipartUploadAbortResponse.newBuilder()).build(), multipartKey, multipartKeyInfo, ozoneManager.isRatisEnabled(), - omVolumeArgs); + omVolumeArgs, omBucketInfo); result = Result.SUCCESS; } catch (IOException ex) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java index d50f32d0ba0..f471de4eab4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java @@ -24,6 +24,7 @@ import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; @@ -116,6 +117,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, OmMultipartKeyInfo multipartKeyInfo = null; Result result = null; OmVolumeArgs omVolumeArgs = null; + OmBucketInfo omBucketInfo = null; try { keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap); volumeName = keyArgs.getVolumeName(); @@ -212,13 +214,16 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, long scmBlockSize = ozoneManager.getScmBlockSize(); int factor = omKeyInfo.getFactor().getNumber(); omVolumeArgs = getVolumeInfo(omMetadataManager, volumeName); + omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName); // update usedBytes atomically. // Block was pre-requested and UsedBytes updated when createKey and // AllocatedBlock. The space occupied by the Key shall be based on // the actual Key size, and the total Block size applied before should // be subtracted. - omVolumeArgs.getUsedBytes().add(omKeyInfo.getDataSize() * factor - - keyArgs.getKeyLocationsList().size() * scmBlockSize * factor); + long correctedSpace = omKeyInfo.getDataSize() * factor - + keyArgs.getKeyLocationsList().size() * scmBlockSize * factor; + omVolumeArgs.getUsedBytes().add(correctedSpace); + omBucketInfo.getUsedBytes().add(correctedSpace); omResponse.setCommitMultiPartUploadResponse( MultipartCommitUploadPartResponse.newBuilder() @@ -226,7 +231,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, omClientResponse = new S3MultipartUploadCommitPartResponse( omResponse.build(), multipartKey, openKey, multipartKeyInfo, oldPartKeyInfo, omKeyInfo, - ozoneManager.isRatisEnabled(), omVolumeArgs); + ozoneManager.isRatisEnabled(), omVolumeArgs, omBucketInfo); result = Result.SUCCESS; } catch (IOException ex) { @@ -235,7 +240,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, omClientResponse = new S3MultipartUploadCommitPartResponse( createErrorOMResponse(omResponse, exception), multipartKey, openKey, multipartKeyInfo, oldPartKeyInfo, omKeyInfo, - ozoneManager.isRatisEnabled(), omVolumeArgs); + ozoneManager.isRatisEnabled(), omVolumeArgs, omBucketInfo); } finally { addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponse.java index 9d7df238417..88cbf8cb65d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponse.java @@ -20,6 +20,7 @@ import javax.annotation.Nonnull; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.response.key.OMKeyCreateResponse; @@ -35,9 +36,10 @@ public class OMFileCreateResponse extends OMKeyCreateResponse { public OMFileCreateResponse(@Nonnull OMResponse omResponse, @Nonnull OmKeyInfo omKeyInfo, @Nonnull List parentKeyInfos, - long openKeySessionID, OmVolumeArgs omVolumeArgs) { + long openKeySessionID, @Nonnull OmVolumeArgs omVolumeArgs, + @Nonnull OmBucketInfo omBucketInfo) { super(omResponse, omKeyInfo, parentKeyInfos, openKeySessionID, - omVolumeArgs); + omVolumeArgs, omBucketInfo); } /** diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java index cbaef70cd26..3995b5572da 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.om.response.key; import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.response.CleanupTableInfo; @@ -41,13 +42,16 @@ public class OMAllocateBlockResponse extends OMClientResponse { private OmKeyInfo omKeyInfo; private long clientID; private OmVolumeArgs omVolumeArgs; + private OmBucketInfo omBucketInfo; public OMAllocateBlockResponse(@Nonnull OMResponse omResponse, - @Nonnull OmKeyInfo omKeyInfo, long clientID, OmVolumeArgs omVolumeArgs) { + @Nonnull OmKeyInfo omKeyInfo, long clientID, + @Nonnull OmVolumeArgs omVolumeArgs, @Nonnull OmBucketInfo omBucketInfo) { super(omResponse); this.omKeyInfo = omKeyInfo; this.clientID = clientID; this.omVolumeArgs = omVolumeArgs; + this.omBucketInfo = omBucketInfo; } /** @@ -72,5 +76,9 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, omMetadataManager.getVolumeTable().putWithBatch(batchOperation, omMetadataManager.getVolumeKey(omVolumeArgs.getVolume()), omVolumeArgs); + // update bucket usedBytes. + omMetadataManager.getBucketTable().putWithBatch(batchOperation, + omMetadataManager.getBucketKey(omVolumeArgs.getVolume(), + omBucketInfo.getBucketName()), omBucketInfo); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java index 0d7a6bae868..aede2ec18e9 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.om.response.key; import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.response.CleanupTableInfo; @@ -42,15 +43,17 @@ public class OMKeyCommitResponse extends OMClientResponse { private String ozoneKeyName; private String openKeyName; private OmVolumeArgs omVolumeArgs; + private OmBucketInfo omBucketInfo; public OMKeyCommitResponse(@Nonnull OMResponse omResponse, @Nonnull OmKeyInfo omKeyInfo, String ozoneKeyName, String openKeyName, - OmVolumeArgs omVolumeArgs) { + @Nonnull OmVolumeArgs omVolumeArgs, @Nonnull OmBucketInfo omBucketInfo) { super(omResponse); this.omKeyInfo = omKeyInfo; this.ozoneKeyName = ozoneKeyName; this.openKeyName = openKeyName; this.omVolumeArgs = omVolumeArgs; + this.omBucketInfo = omBucketInfo; } /** @@ -77,6 +80,10 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, omMetadataManager.getVolumeTable().putWithBatch(batchOperation, omMetadataManager.getVolumeKey(omVolumeArgs.getVolume()), omVolumeArgs); + // update bucket usedBytes. + omMetadataManager.getBucketTable().putWithBatch(batchOperation, + omMetadataManager.getBucketKey(omVolumeArgs.getVolume(), + omBucketInfo.getBucketName()), omBucketInfo); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java index 7e48a8f7115..2ae53591849 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java @@ -23,6 +23,7 @@ import javax.annotation.Nonnull; import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.response.CleanupTableInfo; @@ -47,15 +48,18 @@ public class OMKeyCreateResponse extends OMClientResponse { private long openKeySessionID; private List parentKeyInfos; private OmVolumeArgs omVolumeArgs; + private OmBucketInfo omBucketInfo; public OMKeyCreateResponse(@Nonnull OMResponse omResponse, @Nonnull OmKeyInfo omKeyInfo, List parentKeyInfos, - long openKeySessionID, OmVolumeArgs omVolumeArgs) { + long openKeySessionID, @Nonnull OmVolumeArgs omVolumeArgs, + @Nonnull OmBucketInfo omBucketInfo) { super(omResponse); this.omKeyInfo = omKeyInfo; this.openKeySessionID = openKeySessionID; this.parentKeyInfos = parentKeyInfos; this.omVolumeArgs = omVolumeArgs; + this.omBucketInfo = omBucketInfo; } /** @@ -99,6 +103,10 @@ protected void addToDBBatch(OMMetadataManager omMetadataManager, omMetadataManager.getVolumeTable().putWithBatch(batchOperation, omMetadataManager.getVolumeKey(omVolumeArgs.getVolume()), omVolumeArgs); + // update bucket usedBytes. + omMetadataManager.getBucketTable().putWithBatch(batchOperation, + omMetadataManager.getBucketKey(omVolumeArgs.getVolume(), + omBucketInfo.getBucketName()), omBucketInfo); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java index 8c4b7fdbece..128b657a643 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java @@ -20,6 +20,7 @@ import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; @@ -46,14 +47,16 @@ public class OMKeyDeleteResponse extends OMClientResponse { private OmKeyInfo omKeyInfo; private boolean isRatisEnabled; private OmVolumeArgs omVolumeArgs; + private OmBucketInfo omBucketInfo; public OMKeyDeleteResponse(@Nonnull OMResponse omResponse, @Nonnull OmKeyInfo omKeyInfo, boolean isRatisEnabled, - @Nonnull OmVolumeArgs omVolumeArgs) { + @Nonnull OmVolumeArgs omVolumeArgs, @Nonnull OmBucketInfo omBucketInfo) { super(omResponse); this.omKeyInfo = omKeyInfo; this.isRatisEnabled = isRatisEnabled; this.omVolumeArgs = omVolumeArgs; + this.omBucketInfo = omBucketInfo; } /** @@ -98,6 +101,10 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, omMetadataManager.getVolumeTable().putWithBatch(batchOperation, omMetadataManager.getVolumeKey(omVolumeArgs.getVolume()), omVolumeArgs); + // update bucket usedBytes. + omMetadataManager.getBucketTable().putWithBatch(batchOperation, + omMetadataManager.getBucketKey(omVolumeArgs.getVolume(), + omBucketInfo.getBucketName()), omBucketInfo); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysDeleteResponse.java index c98794a1876..228e5a6b8ea 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysDeleteResponse.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; @@ -45,15 +46,18 @@ public class OMKeysDeleteResponse extends OMClientResponse { private boolean isRatisEnabled; private long trxnLogIndex; private OmVolumeArgs omVolumeArgs; + private OmBucketInfo omBucketInfo; public OMKeysDeleteResponse(@Nonnull OMResponse omResponse, @Nonnull List keyDeleteList, long trxnLogIndex, - boolean isRatisEnabled, OmVolumeArgs omVolumeArgs) { + boolean isRatisEnabled, @Nonnull OmVolumeArgs omVolumeArgs, + @Nonnull OmBucketInfo omBucketInfo) { super(omResponse); this.omKeyInfoList = keyDeleteList; this.isRatisEnabled = isRatisEnabled; this.trxnLogIndex = trxnLogIndex; this.omVolumeArgs = omVolumeArgs; + this.omBucketInfo = omBucketInfo; } /** @@ -113,5 +117,9 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, omMetadataManager.getVolumeTable().putWithBatch(batchOperation, omMetadataManager.getVolumeKey(omVolumeArgs.getVolume()), omVolumeArgs); + // update bucket usedBytes. + omMetadataManager.getBucketTable().putWithBatch(batchOperation, + omMetadataManager.getBucketKey(omVolumeArgs.getVolume(), + omBucketInfo.getBucketName()), omBucketInfo); } } \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java index 1b2ed8d7a3a..73ae49eeec7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java @@ -20,6 +20,7 @@ import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; @@ -52,15 +53,18 @@ public class S3MultipartUploadAbortResponse extends OMClientResponse { private OmMultipartKeyInfo omMultipartKeyInfo; private boolean isRatisEnabled; private OmVolumeArgs omVolumeArgs; + private OmBucketInfo omBucketInfo; public S3MultipartUploadAbortResponse(@Nonnull OMResponse omResponse, String multipartKey, @Nonnull OmMultipartKeyInfo omMultipartKeyInfo, - boolean isRatisEnabled, @Nonnull OmVolumeArgs omVolumeArgs) { + boolean isRatisEnabled, @Nonnull OmVolumeArgs omVolumeArgs, + @Nonnull OmBucketInfo omBucketInfo) { super(omResponse); this.multipartKey = multipartKey; this.omMultipartKeyInfo = omMultipartKeyInfo; this.isRatisEnabled = isRatisEnabled; this.omVolumeArgs = omVolumeArgs; + this.omBucketInfo = omBucketInfo; } /** @@ -104,6 +108,10 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, omMetadataManager.getVolumeTable().putWithBatch(batchOperation, omMetadataManager.getVolumeKey(omVolumeArgs.getVolume()), omVolumeArgs); + // update bucket usedBytes. + omMetadataManager.getBucketTable().putWithBatch(batchOperation, + omMetadataManager.getBucketKey(omVolumeArgs.getVolume(), + omBucketInfo.getBucketName()), omBucketInfo); } } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java index 0cbab3ce8f5..7e8ac55a6dd 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java @@ -20,6 +20,7 @@ import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; @@ -58,6 +59,7 @@ public class S3MultipartUploadCommitPartResponse extends OMClientResponse { private OmKeyInfo openPartKeyInfoToBeDeleted; private boolean isRatisEnabled; private OmVolumeArgs omVolumeArgs; + private OmBucketInfo omBucketInfo; /** * Regular response. @@ -76,7 +78,8 @@ public S3MultipartUploadCommitPartResponse(@Nonnull OMResponse omResponse, @Nullable OmMultipartKeyInfo omMultipartKeyInfo, @Nullable OzoneManagerProtocolProtos.PartKeyInfo oldPartKeyInfo, @Nullable OmKeyInfo openPartKeyInfoToBeDeleted, - boolean isRatisEnabled, OmVolumeArgs omVolumeArgs) { + boolean isRatisEnabled, @Nonnull OmVolumeArgs omVolumeArgs, + @Nonnull OmBucketInfo omBucketInfo) { super(omResponse); this.multipartKey = multipartKey; this.openKey = openKey; @@ -85,6 +88,7 @@ public S3MultipartUploadCommitPartResponse(@Nonnull OMResponse omResponse, this.openPartKeyInfoToBeDeleted = openPartKeyInfoToBeDeleted; this.isRatisEnabled = isRatisEnabled; this.omVolumeArgs = omVolumeArgs; + this.omBucketInfo = omBucketInfo; } @Override @@ -151,6 +155,10 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, omMetadataManager.getVolumeTable().putWithBatch(batchOperation, omMetadataManager.getVolumeKey(omVolumeArgs.getVolume()), omVolumeArgs); + // update bucket usedBytes. + omMetadataManager.getBucketTable().putWithBatch(batchOperation, + omMetadataManager.getBucketKey(omVolumeArgs.getVolume(), + omBucketInfo.getBucketName()), omBucketInfo); } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java index a4834554a12..494a308b28e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.om.response.key; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.util.Time; import org.junit.Assert; @@ -26,10 +27,8 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .AllocateBlockResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.AllocateBlockResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; /** * Tests OMAllocateBlockResponse. @@ -44,6 +43,9 @@ public void testAddToDBBatch() throws Exception { OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder() .setOwnerName(keyName).setAdminName(keyName) .setVolume(volumeName).setCreationTime(Time.now()).build(); + OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder() + .setVolumeName(volumeName).setBucketName(bucketName) + .setCreationTime(Time.now()).build(); OMResponse omResponse = OMResponse.newBuilder() .setAllocateBlockResponse( @@ -53,7 +55,7 @@ public void testAddToDBBatch() throws Exception { .build(); OMAllocateBlockResponse omAllocateBlockResponse = new OMAllocateBlockResponse(omResponse, omKeyInfo, clientID, - omVolumeArgs); + omVolumeArgs, omBucketInfo); String openKey = omMetadataManager.getOpenKey(volumeName, bucketName, keyName, clientID); @@ -75,6 +77,9 @@ public void testAddToDBBatchWithErrorResponse() throws Exception { OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder() .setOwnerName(keyName).setAdminName(keyName) .setVolume(volumeName).setCreationTime(Time.now()).build(); + OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder() + .setVolumeName(volumeName).setBucketName(bucketName) + .setCreationTime(Time.now()).build(); OMResponse omResponse = OMResponse.newBuilder() .setAllocateBlockResponse( @@ -84,7 +89,7 @@ public void testAddToDBBatchWithErrorResponse() throws Exception { .build(); OMAllocateBlockResponse omAllocateBlockResponse = new OMAllocateBlockResponse(omResponse, omKeyInfo, clientID, - omVolumeArgs); + omVolumeArgs, omBucketInfo); // Before calling addToDBBatch String openKey = omMetadataManager.getOpenKey(volumeName, bucketName, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java index 2169665e6a6..ab425f2f60d 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.om.response.key; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.util.Time; import org.junit.Assert; @@ -40,6 +41,9 @@ public void testAddToDBBatch() throws Exception { OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder() .setOwnerName(keyName).setAdminName(keyName) .setVolume(volumeName).setCreationTime(Time.now()).build(); + OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder() + .setVolumeName(volumeName).setBucketName(bucketName) + .setCreationTime(Time.now()).build(); OzoneManagerProtocolProtos.OMResponse omResponse = OzoneManagerProtocolProtos.OMResponse.newBuilder().setCommitKeyResponse( @@ -60,7 +64,7 @@ public void testAddToDBBatch() throws Exception { String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName, keyName); OMKeyCommitResponse omKeyCommitResponse = new OMKeyCommitResponse( - omResponse, omKeyInfo, ozoneKey, openKey, omVolumeArgs); + omResponse, omKeyInfo, ozoneKey, openKey, omVolumeArgs, omBucketInfo); omKeyCommitResponse.addToDBBatch(omMetadataManager, batchOperation); @@ -81,6 +85,9 @@ public void testAddToDBBatchNoOp() throws Exception { OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder() .setOwnerName(keyName).setAdminName(keyName) .setVolume(volumeName).setCreationTime(Time.now()).build(); + OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder() + .setVolumeName(volumeName).setBucketName(bucketName) + .setCreationTime(Time.now()).build(); OzoneManagerProtocolProtos.OMResponse omResponse = OzoneManagerProtocolProtos.OMResponse.newBuilder().setCommitKeyResponse( @@ -95,7 +102,7 @@ public void testAddToDBBatchNoOp() throws Exception { keyName); OMKeyCommitResponse omKeyCommitResponse = new OMKeyCommitResponse( - omResponse, omKeyInfo, ozoneKey, openKey, omVolumeArgs); + omResponse, omKeyInfo, ozoneKey, openKey, omVolumeArgs, omBucketInfo); // As during commit Key, entry will be already there in openKeyTable. // Adding it here. diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponse.java index 006d65f714c..6357000f7cf 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponse.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.om.response.key; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.util.Time; import org.junit.Assert; @@ -46,6 +47,9 @@ public void testAddToDBBatch() throws Exception { OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder() .setOwnerName(keyName).setAdminName(keyName) .setVolume(volumeName).setCreationTime(Time.now()).build(); + OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder() + .setVolumeName(volumeName).setBucketName(bucketName) + .setCreationTime(Time.now()).build(); OMResponse omResponse = OMResponse.newBuilder().setCreateKeyResponse( CreateKeyResponse.getDefaultInstance()) @@ -55,7 +59,7 @@ public void testAddToDBBatch() throws Exception { OMKeyCreateResponse omKeyCreateResponse = new OMKeyCreateResponse(omResponse, omKeyInfo, null, clientID, - omVolumeArgs); + omVolumeArgs, omBucketInfo); String openKey = omMetadataManager.getOpenKey(volumeName, bucketName, keyName, clientID); @@ -76,6 +80,9 @@ public void testAddToDBBatchWithErrorResponse() throws Exception { OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder() .setOwnerName(keyName).setAdminName(keyName) .setVolume(volumeName).setCreationTime(Time.now()).build(); + OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder() + .setVolumeName(volumeName).setBucketName(bucketName) + .setCreationTime(Time.now()).build(); OMResponse omResponse = OMResponse.newBuilder().setCreateKeyResponse( CreateKeyResponse.getDefaultInstance()) @@ -85,7 +92,7 @@ public void testAddToDBBatchWithErrorResponse() throws Exception { OMKeyCreateResponse omKeyCreateResponse = new OMKeyCreateResponse(omResponse, omKeyInfo, null, clientID, - omVolumeArgs); + omVolumeArgs, omBucketInfo); // Before calling addToDBBatch String openKey = omMetadataManager.getOpenKey(volumeName, bucketName, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java index bbf22ce648e..440fa7837eb 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.util.Time; @@ -47,6 +48,9 @@ public void testAddToDBBatch() throws Exception { OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder() .setOwnerName(keyName).setAdminName(keyName) .setVolume(volumeName).setCreationTime(Time.now()).build(); + OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder() + .setVolumeName(volumeName).setBucketName(bucketName) + .setCreationTime(Time.now()).build(); OzoneManagerProtocolProtos.OMResponse omResponse = OzoneManagerProtocolProtos.OMResponse.newBuilder().setDeleteKeyResponse( @@ -56,7 +60,7 @@ public void testAddToDBBatch() throws Exception { .build(); OMKeyDeleteResponse omKeyDeleteResponse = new OMKeyDeleteResponse( - omResponse, omKeyInfo, true, omVolumeArgs); + omResponse, omKeyInfo, true, omVolumeArgs, omBucketInfo); String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName, keyName); @@ -86,6 +90,9 @@ public void testAddToDBBatchWithNonEmptyBlocks() throws Exception { OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder() .setOwnerName(keyName).setAdminName(keyName) .setVolume(volumeName).setCreationTime(Time.now()).build(); + OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder() + .setVolumeName(volumeName).setBucketName(bucketName) + .setCreationTime(Time.now()).build(); // Add block to key. List omKeyLocationInfoList = new ArrayList<>(); @@ -121,7 +128,7 @@ public void testAddToDBBatchWithNonEmptyBlocks() throws Exception { .build(); OMKeyDeleteResponse omKeyDeleteResponse = new OMKeyDeleteResponse( - omResponse, omKeyInfo, true, omVolumeArgs); + omResponse, omKeyInfo, true, omVolumeArgs, omBucketInfo); Assert.assertTrue(omMetadataManager.getKeyTable().isExist(ozoneKey)); omKeyDeleteResponse.addToDBBatch(omMetadataManager, batchOperation); @@ -144,6 +151,9 @@ public void testAddToDBBatchWithErrorResponse() throws Exception { OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder() .setOwnerName(keyName).setAdminName(keyName) .setVolume(volumeName).setCreationTime(Time.now()).build(); + OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder() + .setVolumeName(volumeName).setBucketName(bucketName) + .setCreationTime(Time.now()).build(); OzoneManagerProtocolProtos.OMResponse omResponse = OzoneManagerProtocolProtos.OMResponse.newBuilder().setDeleteKeyResponse( @@ -153,7 +163,7 @@ public void testAddToDBBatchWithErrorResponse() throws Exception { .build(); OMKeyDeleteResponse omKeyDeleteResponse = new OMKeyDeleteResponse( - omResponse, omKeyInfo, true, omVolumeArgs); + omResponse, omKeyInfo, true, omVolumeArgs, omBucketInfo); String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName, keyName); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponse.java index de8d95d7ebc..9c7092623f7 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponse.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.om.response.key; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; @@ -79,10 +80,13 @@ public void testKeysDeleteResponse() throws Exception { OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder() .setOwnerName(keyName).setAdminName(keyName) .setVolume(volumeName).setCreationTime(Time.now()).build(); + OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder() + .setVolumeName(volumeName).setBucketName(bucketName) + .setCreationTime(Time.now()).build(); OMClientResponse omKeysDeleteResponse = new OMKeysDeleteResponse( omResponse, omKeyInfoList, 10L, true, - omVolumeArgs); + omVolumeArgs, omBucketInfo); omKeysDeleteResponse.checkAndUpdateDB(omMetadataManager, batchOperation); @@ -114,10 +118,13 @@ public void testKeysDeleteResponseFail() throws Exception { OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder() .setOwnerName(keyName).setAdminName(keyName) .setVolume(volumeName).setCreationTime(Time.now()).build(); + OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder() + .setVolumeName(volumeName).setBucketName(bucketName) + .setCreationTime(Time.now()).build(); OMClientResponse omKeysDeleteResponse = new OMKeysDeleteResponse( omResponse, omKeyInfoList, 10L, true, - omVolumeArgs); + omVolumeArgs, omBucketInfo); omKeysDeleteResponse.checkAndUpdateDB(omMetadataManager, batchOperation); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java index 6900bbb392a..ca525d263b2 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java @@ -23,6 +23,7 @@ import java.util.Collections; import java.util.UUID; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.junit.Before; import org.junit.Rule; @@ -109,7 +110,8 @@ public S3InitiateMultipartUploadResponse createS3InitiateMPUResponse( public S3MultipartUploadAbortResponse createS3AbortMPUResponse( String multipartKey, long timeStamp, - OmMultipartKeyInfo omMultipartKeyInfo, OmVolumeArgs omVolumeArgs) { + OmMultipartKeyInfo omMultipartKeyInfo, OmVolumeArgs omVolumeArgs, + OmBucketInfo omBucketInfo) { OMResponse omResponse = OMResponse.newBuilder() .setCmdType(OzoneManagerProtocolProtos.Type.AbortMultiPartUpload) .setStatus(OzoneManagerProtocolProtos.Status.OK) @@ -118,7 +120,7 @@ public S3MultipartUploadAbortResponse createS3AbortMPUResponse( MultipartUploadAbortResponse.newBuilder().build()).build(); return new S3MultipartUploadAbortResponse(omResponse, multipartKey, - omMultipartKeyInfo, true, omVolumeArgs); + omMultipartKeyInfo, true, omVolumeArgs, omBucketInfo); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponse.java index fd537216e92..da030a9f143 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponse.java @@ -20,6 +20,7 @@ import java.util.UUID; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.junit.Assert; @@ -50,6 +51,9 @@ public void testAddDBToBatch() throws Exception { OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder() .setOwnerName(keyName).setAdminName(keyName) .setVolume(volumeName).setCreationTime(Time.now()).build(); + OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder() + .setVolumeName(volumeName).setBucketName(bucketName) + .setCreationTime(Time.now()).build(); S3InitiateMultipartUploadResponse s3InitiateMultipartUploadResponse = createS3InitiateMPUResponse(volumeName, bucketName, keyName, multipartUploadID); @@ -60,7 +64,7 @@ public void testAddDBToBatch() throws Exception { S3MultipartUploadAbortResponse s3MultipartUploadAbortResponse = createS3AbortMPUResponse(multipartKey, Time.now(), s3InitiateMultipartUploadResponse.getOmMultipartKeyInfo(), - omVolumeArgs); + omVolumeArgs, omBucketInfo); s3MultipartUploadAbortResponse.addToDBBatch(omMetadataManager, batchOperation); @@ -88,6 +92,9 @@ public void testAddDBToBatchWithParts() throws Exception { OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder() .setOwnerName(keyName).setAdminName(keyName) .setVolume(volumeName).setCreationTime(Time.now()).build(); + OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder() + .setVolumeName(volumeName).setBucketName(bucketName) + .setCreationTime(Time.now()).build(); S3InitiateMultipartUploadResponse s3InitiateMultipartUploadResponse = createS3InitiateMPUResponse(volumeName, bucketName, keyName, @@ -117,7 +124,7 @@ public void testAddDBToBatchWithParts() throws Exception { S3MultipartUploadAbortResponse s3MultipartUploadAbortResponse = createS3AbortMPUResponse(multipartKey, timeStamp, s3InitiateMultipartUploadResponse.getOmMultipartKeyInfo(), - omVolumeArgs); + omVolumeArgs, omBucketInfo); s3MultipartUploadAbortResponse.addToDBBatch(omMetadataManager, batchOperation);