diff --git a/hadoop-hdds/docs/content/feature/Quota.md b/hadoop-hdds/docs/content/feature/Quota.md index 5be9f4db4d0c..933bbb50aec3 100644 --- a/hadoop-hdds/docs/content/feature/Quota.md +++ b/hadoop-hdds/docs/content/feature/Quota.md @@ -31,7 +31,12 @@ So far, we know that Ozone allows users to create volumes, buckets, and keys. A ## Currently supported 1. Storage Space level quota -Administrators should be able to define how much storage space a Volume or Bucket can use. +Administrators should be able to define how much storage space a Volume or Bucket can use. The following Settings for Storage space quota are currently supported: +a. By default, the quota for volume and bucket is not enabled. +b. When volume quota is enabled, the total size of bucket quota cannot exceed volume. +c. Bucket quota can be set separately without enabling Volume quota. The size of bucket quota is unrestricted at this point. +d. Volume quota is not currently supported separately, and volume quota takes effect only if bucket quota is set. Because ozone only check the usedBytes of the bucket when we write the key. + ## Client usage ### Storage Space level quota @@ -59,7 +64,7 @@ bin/ozone sh bucket setquota --space-quota 10GB /volume1/bucket1 ``` This behavior changes the quota for Bucket1 to 10GB -A bucket quota should not be greater than its Volume quota. Let's look at an example. If we have a 10MB Volume and create five buckets under that Volume with a quota of 5MB, the total quota is 25MB. In this case, the bucket creation will always succeed, and we check the quota for bucket and volume when the data is actually written. Each write needs to check whether the current bucket is exceeding the limit and the current total volume usage is exceeding the limit. +Total bucket quota should not be greater than its Volume quota. If we have a 10MB Volume, The sum of the sizes of all buckets under this volume cannot exceed 10MB, otherwise the bucket set quota fails. #### Clear the quota for Volume1. The Bucket cleanup command is similar. ```shell diff --git a/hadoop-hdds/docs/content/feature/Quota.zh.md b/hadoop-hdds/docs/content/feature/Quota.zh.md index 9cdb2217b73f..42726397b392 100644 --- a/hadoop-hdds/docs/content/feature/Quota.zh.md +++ b/hadoop-hdds/docs/content/feature/Quota.zh.md @@ -26,7 +26,11 @@ icon: user ## 目前支持的 1. Storage space级别配额 - 管理员应该能够定义一个Volume或Bucket可以使用多少存储空间。 + 管理员应该能够定义一个Volume或Bucket可以使用多少存储空间。目前支持以下storage space quota的设置: + a. 默认情况下volume和bucket的quota不启用。 + b. 当volume quota启用时,bucket quota的总大小不能超过volume。 + c. 可以在不启用volume quota的情况下单独给bucket设置quota。此时bucket quota的大小是不受限制的。 + d. 目前不支持单独设置volume quota,只有在设置了bucket quota的情况下volume quota才会生效。因为ozone在写入key时只检查bucket的usedBytes。 ## 客户端用法 ### Storage space级别配额 @@ -53,7 +57,7 @@ bin/ozone sh bucket setquota --space-quota 10GB /volume1/bucket1 ``` 该行为将bucket1的配额更改为10GB -一个bucket配额 不应大于其Volume的配额。让我们看一个例子,如果我们有一个10MB的Volume,并在该Volume下创建5个Bucket,配额为5MB,则总配额为25MB。在这种情况下,创建存储桶将始终成功,我们会在数据真正写入时检查bucket和volume的quota。每次写入需要检查当前bucket的是否超上限,当前总的volume使用量是否超上限。 +bucket的总配额 不应大于其Volume的配额。让我们看一个例子,如果我们有一个10MB的Volume,该volume下所有bucket的大小之和不能超过10MB,否则设置bucket quota将失败。 #### 清除Volume1的配额, Bucket清除命令与此类似 ```shell diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java index 0e9e94285423..b54692addd87 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java @@ -85,8 +85,6 @@ public class OzoneVolume extends WithMetadata { private int listCacheSize; - private long usedBytes; - /** * Constructs OzoneVolume instance. * @param conf Configuration object. @@ -135,17 +133,6 @@ public OzoneVolume(ConfigurationSource conf, ClientProtocol proxy, this.modificationTime = Instant.ofEpochMilli(modificationTime); } - @SuppressWarnings("parameternumber") - public OzoneVolume(ConfigurationSource conf, ClientProtocol proxy, - String name, String admin, String owner, long quotaInBytes, - long quotaInCounts, long creationTime, long modificationTime, - List acls, Map metadata, - long usedBytes) { - this(conf, proxy, name, admin, owner, quotaInBytes, quotaInCounts, - creationTime, acls, metadata); - this.usedBytes = usedBytes; - } - @SuppressWarnings("parameternumber") public OzoneVolume(ConfigurationSource conf, ClientProtocol proxy, String name, String admin, String owner, long quotaInBytes, @@ -269,10 +256,6 @@ public List getAcls() { return acls; } - public long getUsedBytes() { - return usedBytes; - } - /** * Sets/Changes the owner of this Volume. * @param userName new owner diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index c61d0eb2074d..9fb650f7d327 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -361,8 +361,7 @@ public OzoneVolume getVolumeDetails(String volumeName) volume.getModificationTime(), volume.getAclMap().ozoneAclGetProtobuf().stream(). map(OzoneAcl::fromProtobuf).collect(Collectors.toList()), - volume.getMetadata(), - volume.getUsedBytes().sum()); + volume.getMetadata()); } @Override @@ -418,8 +417,7 @@ public List listVolumes(String user, String volumePrefix, volume.getModificationTime(), volume.getAclMap().ozoneAclGetProtobuf().stream(). map(OzoneAcl::fromProtobuf).collect(Collectors.toList()), - volume.getMetadata(), - volume.getUsedBytes().sum())) + volume.getMetadata())) .collect(Collectors.toList()); } @@ -674,7 +672,7 @@ public OzoneBucket getBucketDetails( .getEncryptionKeyInfo().getKeyName() : null, bucketInfo.getSourceVolume(), bucketInfo.getSourceBucket(), - bucketInfo.getUsedBytes().sum(), + bucketInfo.getUsedBytes(), bucketInfo.getQuotaInBytes(), bucketInfo.getQuotaInCounts() ); @@ -701,7 +699,7 @@ public List listBuckets(String volumeName, String bucketPrefix, .getEncryptionKeyInfo().getKeyName() : null, bucket.getSourceVolume(), bucket.getSourceBucket(), - bucket.getUsedBytes().sum(), + bucket.getUsedBytes(), bucket.getQuotaInBytes(), bucket.getQuotaInCounts())) .collect(Collectors.toList()); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java index d25cb1257648..a23bbfc1dc06 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java @@ -20,7 +20,6 @@ import java.util.ArrayList; import java.util.BitSet; -import java.util.concurrent.atomic.LongAdder; import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; @@ -80,7 +79,7 @@ public final class OmBucketInfo extends WithObjectID implements Auditable { private final String sourceBucket; - private final LongAdder usedBytes = new LongAdder(); + private long usedBytes; private long quotaInBytes; private long quotaInCounts; @@ -132,7 +131,7 @@ private OmBucketInfo(String volumeName, this.bekInfo = bekInfo; this.sourceVolume = sourceVolume; this.sourceBucket = sourceBucket; - this.usedBytes.add(usedBytes); + this.usedBytes = usedBytes; this.quotaInBytes = quotaInBytes; this.quotaInCounts = quotaInCounts; } @@ -241,9 +240,14 @@ public String getSourceBucket() { } - public LongAdder getUsedBytes() { + public long getUsedBytes() { return usedBytes; } + + public void incrUsedBytes(long bytes) { + this.usedBytes += bytes; + } + public long getQuotaInBytes() { return quotaInBytes; } @@ -324,7 +328,7 @@ public Builder toBuilder() { .setSourceBucket(sourceBucket) .setAcls(acls) .addAllMetadata(metadata) - .setUsedBytes(usedBytes.sum()) + .setUsedBytes(usedBytes) .setQuotaInBytes(quotaInBytes) .setQuotaInCounts(quotaInCounts); } @@ -489,7 +493,7 @@ public BucketInfo getProtobuf() { .setModificationTime(modificationTime) .setObjectID(objectID) .setUpdateID(updateID) - .setUsedBytes(usedBytes.sum()) + .setUsedBytes(usedBytes) .addAllMetadata(KeyValueUtil.toProtobuf(metadata)) .setQuotaInBytes(quotaInBytes) .setQuotaInCounts(quotaInCounts); @@ -557,7 +561,7 @@ public String getObjectInfo() { ", isVersionEnabled='" + isVersionEnabled + "'" + ", storageType='" + storageType + "'" + ", creationTime='" + creationTime + "'" + - ", usedBytes='" + usedBytes.sum() + "'" + + ", usedBytes='" + usedBytes + "'" + ", quotaInBytes='" + quotaInBytes + "'" + ", quotaInCounts='" + quotaInCounts + '\'' + sourceInfo + @@ -582,7 +586,7 @@ public boolean equals(Object o) { storageType == that.storageType && objectID == that.objectID && updateID == that.updateID && - usedBytes.sum() == that.usedBytes.sum() && + usedBytes == that.usedBytes && Objects.equals(sourceVolume, that.sourceVolume) && Objects.equals(sourceBucket, that.sourceBucket) && Objects.equals(metadata, that.metadata) && @@ -609,7 +613,7 @@ public String toString() { ", objectID=" + objectID + ", updateID=" + updateID + ", metadata=" + metadata + - ", usedBytes=" + usedBytes.sum() + + ", usedBytes=" + usedBytes + ", quotaInBytes=" + quotaInBytes + ", quotaInCounts=" + quotaInCounts + '}'; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java index fa7b69725656..13c67c8ad9bb 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java @@ -23,7 +23,6 @@ import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.concurrent.atomic.LongAdder; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConsts; @@ -47,7 +46,6 @@ public final class OmVolumeArgs extends WithObjectID implements Auditable { private long quotaInBytes; private long quotaInCounts; private final OmOzoneAclMap aclMap; - private final LongAdder usedBytes = new LongAdder(); /** * Private constructor, constructed via builder. @@ -57,10 +55,9 @@ public final class OmVolumeArgs extends WithObjectID implements Auditable { * @param quotaInBytes - Volume Quota in bytes. * @param quotaInCounts - Volume Quota in counts. * @param metadata - metadata map for custom key/value data. - * @param usedBytes - Volume Quota Usage in bytes. * @param aclMap - User to access rights map. * @param creationTime - Volume creation time. - * @param objectID - ID of this object. + * @param objectID - ID of this object. * @param updateID - A sequence number that denotes the last update on this * object. This is a monotonically increasing number. */ @@ -68,15 +65,14 @@ public final class OmVolumeArgs extends WithObjectID implements Auditable { "builder."}) private OmVolumeArgs(String adminName, String ownerName, String volume, long quotaInBytes, long quotaInCounts, Map metadata, - long usedBytes, OmOzoneAclMap aclMap, long creationTime, - long modificationTime, long objectID, long updateID) { + OmOzoneAclMap aclMap, long creationTime, long modificationTime, + long objectID, long updateID) { this.adminName = adminName; this.ownerName = ownerName; this.volume = volume; this.quotaInBytes = quotaInBytes; this.quotaInCounts = quotaInCounts; this.metadata = metadata; - this.usedBytes.add(usedBytes); this.aclMap = aclMap; this.creationTime = creationTime; this.modificationTime = modificationTime; @@ -177,10 +173,6 @@ public OmOzoneAclMap getAclMap() { return aclMap; } - public LongAdder getUsedBytes() { - return usedBytes; - } - /** * Returns new builder class that builds a OmVolumeArgs. * @@ -204,8 +196,6 @@ public Map toAuditMap() { String.valueOf(this.quotaInCounts)); auditMap.put(OzoneConsts.OBJECT_ID, String.valueOf(this.getObjectID())); auditMap.put(OzoneConsts.UPDATE_ID, String.valueOf(this.getUpdateID())); - auditMap.put(OzoneConsts.USED_BYTES, - String.valueOf(this.usedBytes)); return auditMap; } @@ -241,7 +231,6 @@ public static class Builder { private OmOzoneAclMap aclMap; private long objectID; private long updateID; - private long usedBytes; /** * Sets the Object ID for this Object. @@ -319,11 +308,6 @@ public Builder addAllMetadata(Map additionalMetaData) { return this; } - public Builder setUsedBytes(long quotaUsage) { - this.usedBytes = quotaUsage; - return this; - } - public Builder addOzoneAcls(OzoneAclInfo acl) throws IOException { aclMap.addAcl(acl); return this; @@ -338,8 +322,8 @@ public OmVolumeArgs build() { Preconditions.checkNotNull(ownerName); Preconditions.checkNotNull(volume); return new OmVolumeArgs(adminName, ownerName, volume, quotaInBytes, - quotaInCounts, metadata, usedBytes, aclMap, creationTime, - modificationTime, objectID, updateID); + quotaInCounts, metadata, aclMap, creationTime, modificationTime, + objectID, updateID); } } @@ -359,7 +343,6 @@ public VolumeInfo getProtobuf() { .setModificationTime(modificationTime) .setObjectID(objectID) .setUpdateID(updateID) - .setUsedBytes(usedBytes.sum()) .build(); } @@ -374,7 +357,6 @@ public static OmVolumeArgs getFromProtobuf(VolumeInfo volInfo) volInfo.getQuotaInBytes(), volInfo.getQuotaInCounts(), KeyValueUtil.getFromProtobuf(volInfo.getMetadataList()), - volInfo.getUsedBytes(), aclMap, volInfo.getCreationTime(), volInfo.getModificationTime(), @@ -390,7 +372,6 @@ public String getObjectInfo() { ", owner='" + ownerName + '\'' + ", creationTime='" + creationTime + '\'' + ", quotaInBytes='" + quotaInBytes + '\'' + - ", usedBytes='" + usedBytes.sum() + '\'' + '}'; } @@ -406,7 +387,7 @@ public OmVolumeArgs copyObject() { OmOzoneAclMap cloneAclMap = aclMap.copyObject(); return new OmVolumeArgs(adminName, ownerName, volume, quotaInBytes, - quotaInCounts, cloneMetadata, usedBytes.sum(), cloneAclMap, - creationTime, modificationTime, objectID, updateID); + quotaInCounts, cloneMetadata, cloneAclMap, creationTime, + modificationTime, objectID, updateID); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java index b7b75a4f8403..1a4300f95605 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java @@ -815,7 +815,6 @@ public void testPutKey() throws IOException { } @Test - @SuppressWarnings("methodlength") public void testCheckUsedBytesQuota() throws IOException { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); @@ -829,74 +828,10 @@ public void testCheckUsedBytesQuota() throws IOException { store.createVolume(volumeName); volume = store.getVolume(volumeName); - - // Test volume quota. - // Set quota In Bytes for a smaller value - store.getVolume(volumeName).setQuota( - OzoneQuota.parseQuota("1 Bytes", 100)); volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); - // Test volume quota: write key. - // The remaining quota does not satisfy a block size, so the write fails. - try { - writeKey(bucket, UUID.randomUUID().toString(), ONE, value, valueLength); - } catch (IOException ex) { - countException++; - GenericTestUtils.assertExceptionContains("QUOTA_EXCEEDED", ex); - } - // Write failed, volume usedBytes should be 0 - Assert.assertEquals(0L, store.getVolume(volumeName).getUsedBytes()); - - // Test volume quota: write file. - // The remaining quota does not satisfy a block size, so the write fails. - try { - writeFile(bucket, UUID.randomUUID().toString(), ONE, value, 0); - } catch (IOException ex) { - countException++; - GenericTestUtils.assertExceptionContains("QUOTA_EXCEEDED", ex); - } - // Write failed, volume usedBytes should be 0 - Assert.assertEquals(0L, store.getVolume(volumeName).getUsedBytes()); - - // Test volume quota: write key(with two blocks), test allocateBlock fails. - store.getVolume(volumeName).setQuota( - OzoneQuota.parseQuota(blockSize + "Bytes", 100)); - try { - OzoneOutputStream out = bucket.createKey(UUID.randomUUID().toString(), - valueLength, STAND_ALONE, ONE, new HashMap<>()); - for (int i = 0; i <= blockSize / value.length(); i++) { - out.write(value.getBytes()); - } - out.close(); - } catch (IOException ex) { - countException++; - GenericTestUtils.assertExceptionContains("QUOTA_EXCEEDED", ex); - } - // AllocateBlock failed, volume usedBytes should be 1 * blockSize. - Assert.assertEquals(blockSize, store.getVolume(volumeName).getUsedBytes()); - - // Test volume quota: write large key(with five blocks), the first four - // blocks will succeed,while the later block will fail. - store.getVolume(volumeName).setQuota( - OzoneQuota.parseQuota(5 * blockSize + "Bytes", 100)); - try { - OzoneOutputStream out = bucket.createKey(UUID.randomUUID().toString(), - valueLength, STAND_ALONE, ONE, new HashMap<>()); - for (int i = 0; i <= (4 * blockSize) / value.length(); i++) { - out.write(value.getBytes()); - } - out.close(); - } catch (IOException ex) { - countException++; - GenericTestUtils.assertExceptionContains("QUOTA_EXCEEDED", ex); - } - // AllocateBlock failed, volume usedBytes should be (4 + 1) * blockSize - Assert.assertEquals(5 * blockSize, - store.getVolume(volumeName).getUsedBytes()); - // Test bucket quota. - // Set quota In Bytes for a smaller value store.getVolume(volumeName).setQuota( OzoneQuota.parseQuota(Long.MAX_VALUE + " Bytes", 100)); bucketName = UUID.randomUUID().toString(); @@ -947,229 +882,7 @@ public void testCheckUsedBytesQuota() throws IOException { Assert.assertEquals(4 * blockSize, store.getVolume(volumeName).getBucket(bucketName).getUsedBytes()); - Assert.assertEquals(7, countException); - } - - @Test - @SuppressWarnings("methodlength") - public void testVolumeUsedBytes() throws IOException { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - OzoneVolume volume = null; - OzoneBucket bucket = null; - - int blockSize = (int) ozoneManager.getConfiguration().getStorageSize( - OZONE_SCM_BLOCK_SIZE, OZONE_SCM_BLOCK_SIZE_DEFAULT, StorageUnit.BYTES); - - // Write data larger than one block size. - String value = generateData(blockSize + 100, - (byte) RandomUtils.nextLong()).toString(); - - int valueLength = value.getBytes().length; - long currentQuotaUsage = 0L; - store.createVolume(volumeName); - volume = store.getVolume(volumeName); - // The initial value should be 0 - Assert.assertEquals(0L, volume.getUsedBytes()); - volume.createBucket(bucketName); - bucket = volume.getBucket(bucketName); - - //Case1: Test the volumeUsedBytes of ONE replications. - String keyName1 = UUID.randomUUID().toString(); - writeKey(bucket, keyName1, ONE, value, valueLength); - volume = store.getVolume(volumeName); - bucket = volume.getBucket(bucketName); - Assert.assertEquals(valueLength, volume.getUsedBytes()); - Assert.assertEquals(valueLength, bucket.getUsedBytes()); - currentQuotaUsage += valueLength; - - // Case2: Test overwrite the same KeyName under ONE Replicates, the - // keyLocationVersions of the Key is 2. - String keyName2 = UUID.randomUUID().toString(); - writeKey(bucket, keyName2, ONE, value, valueLength); - // Overwrite the keyName2 - writeKey(bucket, keyName2, ONE, value, valueLength); - volume = store.getVolume(volumeName); - bucket = volume.getBucket(bucketName); - Assert.assertEquals(valueLength * 2 + currentQuotaUsage, - volume.getUsedBytes()); - Assert.assertEquals(valueLength * 2 + currentQuotaUsage, - bucket.getUsedBytes()); - currentQuotaUsage += valueLength * 2; - - // Case3: Test the volumeUsedBytes of THREE replications. - String keyName3 = UUID.randomUUID().toString(); - writeKey(bucket, keyName3, THREE, value, valueLength); - volume = store.getVolume(volumeName); - bucket = volume.getBucket(bucketName); - Assert.assertEquals(valueLength * 3 + currentQuotaUsage, - volume.getUsedBytes()); - Assert.assertEquals(valueLength * 3 + currentQuotaUsage, - bucket.getUsedBytes()); - currentQuotaUsage += valueLength * 3; - - // Case4: Test overwrite the same KeyName under THREE Replicates, the - // keyLocationVersions of the Key is 2. - String keyName4 = UUID.randomUUID().toString(); - writeKey(bucket, keyName4, THREE, value, valueLength); - // Overwrite the keyName4 - writeKey(bucket, keyName4, THREE, value, valueLength); - volume = store.getVolume(volumeName); - bucket = volume.getBucket(bucketName); - Assert.assertEquals(valueLength * 3 * 2 + currentQuotaUsage, - volume.getUsedBytes()); - Assert.assertEquals(valueLength * 3 * 2 + currentQuotaUsage, - bucket.getUsedBytes()); - currentQuotaUsage += valueLength * 3 * 2; - - //Case5: Do not specify the value Length, simulate HDFS api writing. - // Test the volumeUsedBytes of ONE replications. - String keyName5 = UUID.randomUUID().toString(); - writeFile(bucket, keyName5, ONE, value, 0); - volume = store.getVolume(volumeName); - bucket = volume.getBucket(bucketName); - Assert.assertEquals(valueLength + currentQuotaUsage, - volume.getUsedBytes()); - Assert.assertEquals(valueLength + currentQuotaUsage, - bucket.getUsedBytes()); - currentQuotaUsage += valueLength; - - // Case6: Do not specify the value Length, simulate HDFS api writing. - // Test overwrite the same KeyName under ONE Replicates, the - // keyLocationVersions of the Key is 2. - String keyName6 = UUID.randomUUID().toString(); - writeFile(bucket, keyName6, ONE, value, 0); - // Overwrite the keyName6 - writeFile(bucket, keyName6, ONE, value, 0); - volume = store.getVolume(volumeName); - bucket = volume.getBucket(bucketName); - Assert.assertEquals(valueLength * 2 + currentQuotaUsage, - volume.getUsedBytes()); - Assert.assertEquals(valueLength * 2 + currentQuotaUsage, - bucket.getUsedBytes()); - currentQuotaUsage += valueLength * 2; - - // Case7: Do not specify the value Length, simulate HDFS api writing. - // Test the volumeUsedBytes of THREE replications. - String keyName7 = UUID.randomUUID().toString(); - writeFile(bucket, keyName7, THREE, value, 0); - volume = store.getVolume(volumeName); - bucket = volume.getBucket(bucketName); - Assert.assertEquals(valueLength * 3 + currentQuotaUsage, - volume.getUsedBytes()); - Assert.assertEquals(valueLength * 3 + currentQuotaUsage, - bucket.getUsedBytes()); - currentQuotaUsage += valueLength * 3; - - // Case8: Do not specify the value Length, simulate HDFS api writing. - // Test overwrite the same KeyName under THREE Replicates, the - // keyLocationVersions of the Key is 2. - String keyName8 = UUID.randomUUID().toString(); - writeFile(bucket, keyName8, THREE, value, 0); - // Overwrite the keyName8 - writeFile(bucket, keyName8, THREE, value, 0); - volume = store.getVolume(volumeName); - bucket = volume.getBucket(bucketName); - Assert.assertEquals(valueLength * 3 * 2 + currentQuotaUsage, - volume.getUsedBytes()); - Assert.assertEquals(valueLength * 3 * 2 + currentQuotaUsage, - bucket.getUsedBytes()); - currentQuotaUsage += valueLength * 3 * 2; - - // Case9: Test volumeUsedBytes when delete key of ONE replications. - bucket.deleteKey(keyName1); - volume = store.getVolume(volumeName); - bucket = volume.getBucket(bucketName); - Assert.assertEquals(currentQuotaUsage - valueLength, - volume.getUsedBytes()); - Assert.assertEquals(currentQuotaUsage - valueLength, - bucket.getUsedBytes()); - currentQuotaUsage -= valueLength; - - // Case10: Test volumeUsedBytes when delete key of THREE - // replications. - bucket.deleteKey(keyName3); - volume = store.getVolume(volumeName); - bucket = volume.getBucket(bucketName); - Assert.assertEquals(currentQuotaUsage - valueLength * 3, - volume.getUsedBytes()); - Assert.assertEquals(currentQuotaUsage - valueLength * 3, - bucket.getUsedBytes()); - currentQuotaUsage -= valueLength * 3; - - // Case11: Test volumeUsedBytes when Test Delete keys. At this - // point all keys are deleted, volumeUsedBytes should be 0 - List keyList = new ArrayList<>(); - keyList.add(keyName2); - keyList.add(keyName4); - keyList.add(keyName5); - keyList.add(keyName6); - keyList.add(keyName7); - keyList.add(keyName8); - bucket.deleteKeys(keyList); - volume = store.getVolume(volumeName); - bucket = volume.getBucket(bucketName); - Assert.assertEquals(0, volume.getUsedBytes()); - Assert.assertEquals(0, bucket.getUsedBytes()); - } - - @Test - public void testVolumeQuotaWithMultiThread() throws IOException, - InterruptedException{ - String volumeName = UUID.randomUUID().toString(); - - int blockSize = (int) ozoneManager.getConfiguration().getStorageSize( - OZONE_SCM_BLOCK_SIZE, OZONE_SCM_BLOCK_SIZE_DEFAULT, StorageUnit.BYTES); - // Write data larger than one block size. - String value = generateData(blockSize + 100, - (byte) RandomUtils.nextLong()).toString(); - - int valueLength = value.getBytes().length; - long currentQuotaUsage = 0L; - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - // The initial value should be 0 - Assert.assertEquals(0L, volume.getUsedBytes()); - - CountDownLatch latch = new CountDownLatch(2); - AtomicInteger failCount = new AtomicInteger(0); - - // Multiple threads write different buckets and ensure that the volume - // quota is correct. - Runnable r = () -> { - try { - for (int i = 0; i < 10; i++) { - String keyName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - OzoneOutputStream out = bucket.createKey(keyName, valueLength, - STAND_ALONE, ONE, new HashMap<>()); - out.write(value.getBytes()); - out.close(); - } - latch.countDown(); - } catch (IOException ex) { - latch.countDown(); - failCount.incrementAndGet(); - } - }; - - Thread thread1 = new Thread(r); - Thread thread2 = new Thread(r); - - thread1.start(); - thread2.start(); - - latch.await(6000, TimeUnit.SECONDS); - - if (failCount.get() > 0) { - fail("testVolumeQuotaWithMultiThread failed"); - } - currentQuotaUsage += valueLength * 10 * 2; - Assert.assertEquals(currentQuotaUsage, - store.getVolume(volumeName).getUsedBytes()); - + Assert.assertEquals(3, countException); } private void writeKey(OzoneBucket bucket, String keyName, @@ -1203,8 +916,6 @@ public void testUsedBytesWithUploadPart() throws IOException { store.createVolume(volumeName); OzoneVolume volume = store.getVolume(volumeName); - // The initial value should be 0 - Assert.assertEquals(0L, volume.getUsedBytes()); volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName, @@ -1223,14 +934,11 @@ public void testUsedBytesWithUploadPart() throws IOException { sampleData.length()); ozoneOutputStream.close(); - Assert.assertEquals(valueLength, store.getVolume(volumeName) - .getUsedBytes()); Assert.assertEquals(valueLength, store.getVolume(volumeName) .getBucket(bucketName).getUsedBytes()); - // Abort uploaded partKey and the usedBytes of volume should be 0. + // Abort uploaded partKey and the usedBytes of bucket should be 0. bucket.abortMultipartUpload(keyName, uploadID); - Assert.assertEquals(0, store.getVolume(volumeName).getUsedBytes()); Assert.assertEquals(0, store.getVolume(volumeName) .getBucket(bucketName).getUsedBytes()); } diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index b347dc1b9df2..613838f09eba 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -364,7 +364,6 @@ message VolumeInfo { optional uint64 updateID = 9; optional uint64 modificationTime = 10; optional uint64 quotaInCounts = 11; - optional uint64 usedBytes = 12; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java index 583facbc0fca..415466138e5b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java @@ -48,16 +48,11 @@ import org.apache.hadoop.ozone.om.response.bucket.OMBucketSetPropertyResponse; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .BucketArgs; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .SetBucketPropertyRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .SetBucketPropertyResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetBucketPropertyRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetBucketPropertyResponse; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java index 9a7f31aece9c..782cb963df28 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java @@ -284,7 +284,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, * ozoneManager.getScmBlockSize() * omKeyInfo.getFactor().getNumber(); checkBucketQuotaInBytes(omBucketInfo, preAllocatedSpace); - checkVolumeQuotaInBytes(omVolumeArgs, preAllocatedSpace); // Add to cache entry can be done outside of lock for this openKey. // Even if bucket gets deleted, when commitKey we shall identify if @@ -299,9 +298,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, bucketName, Optional.absent(), Optional.of(missingParentInfos), trxnLogIndex); - // update usedBytes atomically. - omVolumeArgs.getUsedBytes().add(preAllocatedSpace); - omBucketInfo.getUsedBytes().add(preAllocatedSpace); + omBucketInfo.incrUsedBytes(preAllocatedSpace); // Prepare response omResponse.setCreateFileResponse(CreateFileResponse.newBuilder() @@ -310,7 +307,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, .setOpenVersion(openVersion).build()) .setCmdType(Type.CreateFile); omClientResponse = new OMFileCreateResponse(omResponse.build(), - omKeyInfo, missingParentInfos, clientID, omVolumeArgs, omBucketInfo); + omKeyInfo, missingParentInfos, clientID, omVolumeArgs, + omBucketInfo.copyObject()); result = Result.SUCCESS; } catch (IOException ex) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java index 194e7ef9de1c..1fd4b0754679 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java @@ -56,6 +56,7 @@ import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; /** * Handles allocate block request. @@ -168,6 +169,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, IOException exception = null; OmVolumeArgs omVolumeArgs = null; OmBucketInfo omBucketInfo = null; + boolean acquiredLock = false; try { keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap); @@ -195,13 +197,15 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, List newLocationList = Collections.singletonList( OmKeyLocationInfo.getFromProtobuf(blockLocation)); omVolumeArgs = getVolumeInfo(omMetadataManager, volumeName); + + acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, + volumeName, bucketName); omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName); // check bucket and volume quota long preAllocatedSpace = newLocationList.size() * ozoneManager.getScmBlockSize() * openKeyInfo.getFactor().getNumber(); checkBucketQuotaInBytes(omBucketInfo, preAllocatedSpace); - checkVolumeQuotaInBytes(omVolumeArgs, preAllocatedSpace); // Append new block openKeyInfo.appendNewBlocks(newLocationList, false); @@ -216,14 +220,12 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, new CacheKey<>(openKeyName), new CacheValue<>(Optional.of(openKeyInfo), trxnLogIndex)); - // update usedBytes atomically. - omVolumeArgs.getUsedBytes().add(preAllocatedSpace); - omBucketInfo.getUsedBytes().add(preAllocatedSpace); + omBucketInfo.incrUsedBytes(preAllocatedSpace); omResponse.setAllocateBlockResponse(AllocateBlockResponse.newBuilder() .setKeyLocation(blockLocation).build()); omClientResponse = new OMAllocateBlockResponse(omResponse.build(), - openKeyInfo, clientID, omVolumeArgs, omBucketInfo); + openKeyInfo, clientID, omVolumeArgs, omBucketInfo.copyObject()); LOG.debug("Allocated block for Volume:{}, Bucket:{}, OpenKey:{}", volumeName, bucketName, openKeyName); @@ -237,6 +239,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, } finally { addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper); + if (acquiredLock) { + omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, + bucketName); + } } auditLog(auditLogger, buildAuditMessage(OMAction.ALLOCATE_BLOCK, auditMap, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java index b1d47de0d281..c914bc0e512c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java @@ -158,7 +158,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, validateBucketAndVolume(omMetadataManager, volumeName, bucketName); - // Check for directory exists with same name, if it exists throw error. + // Check for directory exists with same name, if it exists throw error. if (ozoneManager.getEnableFileSystemPaths()) { if (checkDirectoryAlreadyExists(volumeName, bucketName, keyName, omMetadataManager)) { @@ -167,7 +167,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, } } - omKeyInfo = omMetadataManager.getOpenKeyTable().get(dbOpenKey); if (omKeyInfo == null) { throw new OMException("Failed to commit key, as " + dbOpenKey + @@ -196,18 +195,17 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, int factor = omKeyInfo.getFactor().getNumber(); omVolumeArgs = getVolumeInfo(omMetadataManager, volumeName); omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName); - // update usedBytes atomically. // Block was pre-requested and UsedBytes updated when createKey and // AllocatedBlock. The space occupied by the Key shall be based on // the actual Key size, and the total Block size applied before should // be subtracted. long correctedSpace = omKeyInfo.getDataSize() * factor - locationInfoList.size() * scmBlockSize * factor; - omVolumeArgs.getUsedBytes().add(correctedSpace); - omBucketInfo.getUsedBytes().add(correctedSpace); + omBucketInfo.incrUsedBytes(correctedSpace); omClientResponse = new OMKeyCommitResponse(omResponse.build(), - omKeyInfo, dbOzoneKey, dbOpenKey, omVolumeArgs, omBucketInfo); + omKeyInfo, dbOzoneKey, dbOpenKey, omVolumeArgs, + omBucketInfo.copyObject()); result = Result.SUCCESS; } catch (IOException ex) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java index 5ec79b5c4e7f..869a864cc871 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java @@ -300,7 +300,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, * omKeyInfo.getFactor().getNumber(); // check bucket and volume quota checkBucketQuotaInBytes(omBucketInfo, preAllocatedSpace); - checkVolumeQuotaInBytes(omVolumeArgs, preAllocatedSpace); // Add to cache entry can be done outside of lock for this openKey. // Even if bucket gets deleted, when commitKey we shall identify if @@ -309,8 +308,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, new CacheKey<>(dbOpenKeyName), new CacheValue<>(Optional.of(omKeyInfo), trxnLogIndex)); - omVolumeArgs.getUsedBytes().add(preAllocatedSpace); - omBucketInfo.getUsedBytes().add(preAllocatedSpace); + omBucketInfo.incrUsedBytes(preAllocatedSpace); // Prepare response omResponse.setCreateKeyResponse(CreateKeyResponse.newBuilder() @@ -319,7 +317,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, .setOpenVersion(openVersion).build()) .setCmdType(Type.CreateKey); omClientResponse = new OMKeyCreateResponse(omResponse.build(), - omKeyInfo, missingParentInfos, clientID, omVolumeArgs, omBucketInfo); + omKeyInfo, missingParentInfos, clientID, omVolumeArgs, + omBucketInfo.copyObject()); result = Result.SUCCESS; } catch (IOException ex) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java index 43d9c2ddbb0c..a99c02bc0094 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java @@ -147,9 +147,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName); long quotaReleased = sumBlockLengths(omKeyInfo); - // update usedBytes atomically. - omVolumeArgs.getUsedBytes().add(-quotaReleased); - omBucketInfo.getUsedBytes().add(-quotaReleased); + omBucketInfo.incrUsedBytes(-quotaReleased); // No need to add cache entries to delete table. As delete table will // be used by DeleteKeyService only, not used for any client response @@ -158,7 +156,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, omClientResponse = new OMKeyDeleteResponse(omResponse .setDeleteKeyResponse(DeleteKeyResponse.newBuilder()).build(), - omKeyInfo, ozoneManager.isRatisEnabled(), omVolumeArgs, omBucketInfo); + omKeyInfo, ozoneManager.isRatisEnabled(), omVolumeArgs, + omBucketInfo.copyObject()); result = Result.SUCCESS; } catch (IOException ex) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java index ee48f9b3da21..e71e52b35879 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java @@ -569,27 +569,6 @@ protected FileEncryptionInfo getFileEncryptionInfo(KeyArgs keyArgs) { return encryptionInfo; } - /** - * Check volume quota in bytes. - * @param omVolumeArgs - * @param allocateSize - * @throws IOException - */ - protected void checkVolumeQuotaInBytes(OmVolumeArgs omVolumeArgs, - long allocateSize) throws IOException { - if (omVolumeArgs.getQuotaInBytes() > OzoneConsts.QUOTA_RESET) { - long usedBytes = omVolumeArgs.getUsedBytes().sum(); - long quotaInBytes = omVolumeArgs.getQuotaInBytes(); - if (quotaInBytes - usedBytes < allocateSize) { - throw new OMException("The DiskSpace quota of volume:" - + omVolumeArgs.getVolume() + "exceeded: quotaInBytes: " - + quotaInBytes + " Bytes but diskspace consumed: " + (usedBytes - + allocateSize) + " Bytes.", - OMException.ResultCodes.QUOTA_EXCEEDED); - } - } - } - /** * Check bucket quota in bytes. * @param omBucketInfo @@ -599,7 +578,7 @@ protected void checkVolumeQuotaInBytes(OmVolumeArgs omVolumeArgs, protected void checkBucketQuotaInBytes(OmBucketInfo omBucketInfo, long allocateSize) throws IOException { if (omBucketInfo.getQuotaInBytes() > OzoneConsts.QUOTA_RESET) { - long usedBytes = omBucketInfo.getUsedBytes().sum(); + long usedBytes = omBucketInfo.getUsedBytes(); long quotaInBytes = omBucketInfo.getQuotaInBytes(); if (quotaInBytes - usedBytes < allocateSize) { throw new OMException("The DiskSpace quota of bucket:" diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java index 71e15f541819..3dc22e8fa25b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java @@ -170,16 +170,15 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled()); quotaReleased += sumBlockLengths(omKeyInfo); } - // update usedBytes atomically. - omVolumeArgs.getUsedBytes().add(-quotaReleased); - omBucketInfo.getUsedBytes().add(-quotaReleased); + omBucketInfo.incrUsedBytes(-quotaReleased); omClientResponse = new OMKeysDeleteResponse(omResponse .setDeleteKeysResponse(DeleteKeysResponse.newBuilder() .setStatus(deleteStatus).setUnDeletedKeys(unDeletedKeys)) .setStatus(deleteStatus ? OK : PARTIAL_DELETE) .setSuccess(deleteStatus).build(), omKeyInfoList, - ozoneManager.isRatisEnabled(), omVolumeArgs, omBucketInfo); + ozoneManager.isRatisEnabled(), omVolumeArgs, + omBucketInfo.copyObject()); result = Result.SUCCESS; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java index 8b53e7045dcc..42dc85d705e4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java @@ -152,8 +152,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, quotaReleased += iterPartKeyInfo.getPartKeyInfo().getDataSize() * keyFactor; } - omVolumeArgs.getUsedBytes().add(-quotaReleased); - omBucketInfo.getUsedBytes().add(-quotaReleased); + omBucketInfo.incrUsedBytes(-quotaReleased); // Update cache of openKeyTable and multipartInfo table. // No need to add the cache entries to delete table, as the entries @@ -169,7 +168,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, omResponse.setAbortMultiPartUploadResponse( MultipartUploadAbortResponse.newBuilder()).build(), multipartKey, multipartKeyInfo, ozoneManager.isRatisEnabled(), - omVolumeArgs, omBucketInfo); + omVolumeArgs, omBucketInfo.copyObject()); result = Result.SUCCESS; } catch (IOException ex) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java index f471de4eab47..78c8623ebf3c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java @@ -118,6 +118,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Result result = null; OmVolumeArgs omVolumeArgs = null; OmBucketInfo omBucketInfo = null; + OmBucketInfo copyBucketInfo = null; try { keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap); volumeName = keyArgs.getVolumeName(); @@ -215,15 +216,13 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, int factor = omKeyInfo.getFactor().getNumber(); omVolumeArgs = getVolumeInfo(omMetadataManager, volumeName); omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName); - // update usedBytes atomically. // Block was pre-requested and UsedBytes updated when createKey and // AllocatedBlock. The space occupied by the Key shall be based on // the actual Key size, and the total Block size applied before should // be subtracted. long correctedSpace = omKeyInfo.getDataSize() * factor - keyArgs.getKeyLocationsList().size() * scmBlockSize * factor; - omVolumeArgs.getUsedBytes().add(correctedSpace); - omBucketInfo.getUsedBytes().add(correctedSpace); + omBucketInfo.incrUsedBytes(correctedSpace); omResponse.setCommitMultiPartUploadResponse( MultipartCommitUploadPartResponse.newBuilder() @@ -231,7 +230,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, omClientResponse = new S3MultipartUploadCommitPartResponse( omResponse.build(), multipartKey, openKey, multipartKeyInfo, oldPartKeyInfo, omKeyInfo, - ozoneManager.isRatisEnabled(), omVolumeArgs, omBucketInfo); + ozoneManager.isRatisEnabled(), omVolumeArgs, + omBucketInfo.copyObject()); result = Result.SUCCESS; } catch (IOException ex) { @@ -240,7 +240,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, omClientResponse = new S3MultipartUploadCommitPartResponse( createErrorOMResponse(omResponse, exception), multipartKey, openKey, multipartKeyInfo, oldPartKeyInfo, omKeyInfo, - ozoneManager.isRatisEnabled(), omVolumeArgs, omBucketInfo); + ozoneManager.isRatisEnabled(), omVolumeArgs, copyBucketInfo); } finally { addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java index 3995b5572dab..acc43eef8981 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java @@ -72,10 +72,6 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, omMetadataManager.getOpenKeyTable().putWithBatch(batchOperation, openKey, omKeyInfo); - // update volume usedBytes. - omMetadataManager.getVolumeTable().putWithBatch(batchOperation, - omMetadataManager.getVolumeKey(omVolumeArgs.getVolume()), - omVolumeArgs); // update bucket usedBytes. omMetadataManager.getBucketTable().putWithBatch(batchOperation, omMetadataManager.getBucketKey(omVolumeArgs.getVolume(), diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java index aede2ec18e91..8e2f6dce8070 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java @@ -76,10 +76,6 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, omMetadataManager.getKeyTable().putWithBatch(batchOperation, ozoneKeyName, omKeyInfo); - // update volume usedBytes. - omMetadataManager.getVolumeTable().putWithBatch(batchOperation, - omMetadataManager.getVolumeKey(omVolumeArgs.getVolume()), - omVolumeArgs); // update bucket usedBytes. omMetadataManager.getBucketTable().putWithBatch(batchOperation, omMetadataManager.getBucketKey(omVolumeArgs.getVolume(), diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java index 2ae53591849a..5e4d432e8e46 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java @@ -99,10 +99,6 @@ protected void addToDBBatch(OMMetadataManager omMetadataManager, omMetadataManager.getOpenKeyTable().putWithBatch(batchOperation, openKey, omKeyInfo); - // update volume usedBytes. - omMetadataManager.getVolumeTable().putWithBatch(batchOperation, - omMetadataManager.getVolumeKey(omVolumeArgs.getVolume()), - omVolumeArgs); // update bucket usedBytes. omMetadataManager.getBucketTable().putWithBatch(batchOperation, omMetadataManager.getBucketKey(omVolumeArgs.getVolume(), diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java index f9c6d185f398..e85670154074 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java @@ -73,10 +73,6 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, addDeletionToBatch(omMetadataManager, batchOperation, keyTable, ozoneKey, omKeyInfo); - // update volume usedBytes. - omMetadataManager.getVolumeTable().putWithBatch(batchOperation, - omMetadataManager.getVolumeKey(omVolumeArgs.getVolume()), - omVolumeArgs); // update bucket usedBytes. omMetadataManager.getBucketTable().putWithBatch(batchOperation, omMetadataManager.getBucketKey(omVolumeArgs.getVolume(), diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysDeleteResponse.java index bf1a8ddfe387..00a23fcbbc86 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysDeleteResponse.java @@ -89,10 +89,6 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, deleteKey, omKeyInfo); } - // update volume usedBytes. - omMetadataManager.getVolumeTable().putWithBatch(batchOperation, - omMetadataManager.getVolumeKey(omVolumeArgs.getVolume()), - omVolumeArgs); // update bucket usedBytes. omMetadataManager.getBucketTable().putWithBatch(batchOperation, omMetadataManager.getBucketKey(omVolumeArgs.getVolume(), diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java index 73ae49eeec76..b11a7327306d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java @@ -104,10 +104,6 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, omMetadataManager.getDeletedTable().putWithBatch(batchOperation, partKeyInfo.getPartName(), repeatedOmKeyInfo); - // update volume usedBytes. - omMetadataManager.getVolumeTable().putWithBatch(batchOperation, - omMetadataManager.getVolumeKey(omVolumeArgs.getVolume()), - omVolumeArgs); // update bucket usedBytes. omMetadataManager.getBucketTable().putWithBatch(batchOperation, omMetadataManager.getBucketKey(omVolumeArgs.getVolume(), diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java index 7e8ac55a6dd5..496175fc3822 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java @@ -151,10 +151,7 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, // safely delete part key info from open key table. omMetadataManager.getOpenKeyTable().deleteWithBatch(batchOperation, openKey); - // update volume usedBytes. - omMetadataManager.getVolumeTable().putWithBatch(batchOperation, - omMetadataManager.getVolumeKey(omVolumeArgs.getVolume()), - omVolumeArgs); + // update bucket usedBytes. omMetadataManager.getBucketTable().putWithBatch(batchOperation, omMetadataManager.getBucketKey(omVolumeArgs.getVolume(), diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketSetPropertyRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketSetPropertyRequest.java index c315ff0081a5..6011a973157b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketSetPropertyRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketSetPropertyRequest.java @@ -148,7 +148,8 @@ public void testValidateAndUpdateCacheWithQuota() throws Exception { } catch (IllegalArgumentException ex) { countException++; GenericTestUtils.assertExceptionContains( - "Total buckets quota in this volume should not be", ex); + "Total buckets quota in this volume should not be " + + "greater than volume quota", ex); } Assert.assertEquals(1, countException); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetQuotaRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetQuotaRequest.java index f1e2400c7eea..340c2f5cee46 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetQuotaRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetQuotaRequest.java @@ -187,7 +187,8 @@ public void testValidateAndUpdateCacheWithQuota() throws Exception { } catch (IllegalArgumentException ex) { countException++; GenericTestUtils.assertExceptionContains( - "Total buckets quota in this volume should not be", ex); + "Total buckets quota in this volume should not be " + + "greater than volume quota", ex); } Assert.assertEquals(1, countException); }