Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,7 @@
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerLocationUtil;
import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OmFailoverProxyUtil;
import org.apache.hadoop.ozone.om.OzoneManager;
Expand All @@ -104,6 +105,7 @@
import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo;
import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
import org.apache.hadoop.ozone.om.helpers.QuotaUtil;
import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
Expand Down Expand Up @@ -1082,6 +1084,7 @@ private void bucketUsedBytesTestHelper(BucketLayout bucketLayout)
static Stream<BucketLayout> bucketLayouts() {
return Stream.of(
BucketLayout.OBJECT_STORE,
BucketLayout.LEGACY,
BucketLayout.FILE_SYSTEM_OPTIMIZED
);
}
Expand Down Expand Up @@ -1172,6 +1175,101 @@ private void bucketQuotaTestHelper(int keyLength, ReplicationConfig repConfig)
store.getVolume(volumeName).getBucket(bucketName).getUsedBytes());
}

@ParameterizedTest
@MethodSource("bucketLayouts")
public void testBucketUsedNamespace(BucketLayout layout) throws IOException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
String value = "sample value";
int valueLength = value.getBytes(UTF_8).length;
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
BucketArgs bucketArgs = BucketArgs.newBuilder()
.setBucketLayout(layout)
.build();
volume.createBucket(bucketName, bucketArgs);
OzoneBucket bucket = volume.getBucket(bucketName);
String keyName1 = UUID.randomUUID().toString();
String keyName2 = UUID.randomUUID().toString();

writeKey(bucket, keyName1, ONE, value, valueLength);
Assert.assertEquals(1L, getBucketUsedNamespace(volumeName, bucketName));
// Test create a file twice will not increase usedNamespace twice
writeKey(bucket, keyName1, ONE, value, valueLength);
Assert.assertEquals(1L, getBucketUsedNamespace(volumeName, bucketName));
writeKey(bucket, keyName2, ONE, value, valueLength);
Assert.assertEquals(2L, getBucketUsedNamespace(volumeName, bucketName));
bucket.deleteKey(keyName1);
Assert.assertEquals(1L, getBucketUsedNamespace(volumeName, bucketName));
bucket.deleteKey(keyName2);
Assert.assertEquals(0L, getBucketUsedNamespace(volumeName, bucketName));

RpcClient client = new RpcClient(cluster.getConf(), null);
String directoryName1 = UUID.randomUUID().toString();
String directoryName2 = UUID.randomUUID().toString();

client.createDirectory(volumeName, bucketName, directoryName1);
Assert.assertEquals(1L, getBucketUsedNamespace(volumeName, bucketName));
// Test create a directory twice will not increase usedNamespace twice
client.createDirectory(volumeName, bucketName, directoryName2);
Assert.assertEquals(2L, getBucketUsedNamespace(volumeName, bucketName));
client.deleteKey(volumeName, bucketName,
OzoneFSUtils.addTrailingSlashIfNeeded(directoryName1), false);
Assert.assertEquals(1L, getBucketUsedNamespace(volumeName, bucketName));
client.deleteKey(volumeName, bucketName,
OzoneFSUtils.addTrailingSlashIfNeeded(directoryName2), false);
Assert.assertEquals(0L, getBucketUsedNamespace(volumeName, bucketName));

String multiComponentsDir = "dir1/dir2/dir3/dir4";
client.createDirectory(volumeName, bucketName, multiComponentsDir);
Assert.assertEquals(OzoneFSUtils.getFileCount(multiComponentsDir),
getBucketUsedNamespace(volumeName, bucketName));
}

@ParameterizedTest
@MethodSource("bucketLayouts")
public void testMissingParentBucketUsedNamespace(BucketLayout layout)
throws IOException {
// when will put a key that contain not exist directory only FSO buckets
// and LEGACY buckets with ozone.om.enable.filesystem.paths set to true
// will create missing directories.
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
String value = "sample value";
int valueLength = value.getBytes(UTF_8).length;
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
BucketArgs bucketArgs = BucketArgs.newBuilder()
.setBucketLayout(layout)
.build();
volume.createBucket(bucketName, bucketArgs);
OzoneBucket bucket = volume.getBucket(bucketName);

if (layout.equals(BucketLayout.LEGACY)) {
OzoneConfiguration conf = cluster.getConf();
conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true);
cluster.setConf(conf);
}

// the directory "/dir1", ""/dir1/dir2/", "/dir1/dir2/dir3/"
// will be created automatically
String missingParentKeyName = "dir1/dir2/dir3/file1";
writeKey(bucket, missingParentKeyName, ONE, value, valueLength);
if (layout.equals(BucketLayout.OBJECT_STORE)) {
// for OBJECT_STORE bucket, missing parent will not be
// created automatically
Assert.assertEquals(1, getBucketUsedNamespace(volumeName, bucketName));
} else {
Assert.assertEquals(OzoneFSUtils.getFileCount(missingParentKeyName),
getBucketUsedNamespace(volumeName, bucketName));
}
}

private long getBucketUsedNamespace(String volume, String bucket)
throws IOException {
return store.getVolume(volume).getBucket(bucket).getUsedNamespace();
}

@Test
public void testVolumeUsedNamespace() throws IOException {
String volumeName = UUID.randomUUID().toString();
Expand Down Expand Up @@ -1224,7 +1322,7 @@ public void testVolumeUsedNamespace() throws IOException {
}

@Test
public void testBucketUsedNamespace() throws IOException {
public void testBucketQuotaInNamespace() throws IOException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
String key1 = UUID.randomUUID().toString();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@
import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
Expand Down Expand Up @@ -215,9 +216,13 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
OMFileRequest.addKeyTableCacheEntries(omMetadataManager, volumeName,
bucketName, Optional.of(dirKeyInfo),
Optional.of(missingParentInfos), trxnLogIndex);
OmBucketInfo omBucketInfo =
getBucketInfo(omMetadataManager, volumeName, bucketName);
omBucketInfo.incrUsedNamespace(numMissingParents + 1L);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should we be adding the number of missing keys to the used namespace? I can see that below call to logResult() is updating the metrics with the number.

result = Result.SUCCESS;
omClientResponse = new OMDirectoryCreateResponse(omResponse.build(),
dirKeyInfo, missingParentInfos, result, getBucketLayout());
dirKeyInfo, missingParentInfos, result, getBucketLayout(),
omBucketInfo.copyObject());
} else {
// omDirectoryResult == DIRECTORY_EXITS
result = Result.DIRECTORY_ALREADY_EXISTS;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
Expand Down Expand Up @@ -172,12 +173,15 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,

// total number of keys created.
numKeysCreated = missingParentInfos.size() + 1;
OmBucketInfo omBucketInfo =
getBucketInfo(omMetadataManager, volumeName, bucketName);
omBucketInfo.incrUsedNamespace(numKeysCreated);

result = OMDirectoryCreateRequest.Result.SUCCESS;
omClientResponse =
new OMDirectoryCreateResponseWithFSO(omResponse.build(),
volumeId, bucketId, dirInfo, missingParentInfos, result,
getBucketLayout());
getBucketLayout(), omBucketInfo.copyObject());
} else {
result = Result.DIRECTORY_ALREADY_EXISTS;
omResponse.setStatus(Status.DIRECTORY_ALREADY_EXISTS);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -319,6 +319,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
new CacheKey<>(dbOpenKeyName),
new CacheValue<>(Optional.of(omKeyInfo), trxnLogIndex));

omBucketInfo.incrUsedNamespace(numMissingParents);
// Prepare response
omResponse.setCreateKeyResponse(CreateKeyResponse.newBuilder()
.setKeyInfo(omKeyInfo.getNetworkProtobuf(getOmRequest().getVersion(),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -204,6 +204,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
// Prepare response. Sets user given full key name in the 'keyName'
// attribute in response object.
int clientVersion = getOmRequest().getVersion();
omBucketInfo.incrUsedNamespace(numKeysCreated);
omResponse.setCreateKeyResponse(CreateKeyResponse.newBuilder()
.setKeyInfo(omFileInfo.getNetworkProtobuf(keyName, clientVersion,
keyArgs.getLatestVersionLocation()))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@

import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequest.Result;
Expand Down Expand Up @@ -49,15 +50,17 @@ public class OMDirectoryCreateResponse extends OmKeyResponse {
private OmKeyInfo dirKeyInfo;
private List<OmKeyInfo> parentKeyInfos;
private Result result;
private OmBucketInfo bucketInfo;

public OMDirectoryCreateResponse(@Nonnull OMResponse omResponse,
@Nonnull OmKeyInfo dirKeyInfo,
@Nonnull List<OmKeyInfo> parentKeyInfos, @Nonnull Result result,
@Nonnull BucketLayout bucketLayout) {
@Nonnull BucketLayout bucketLayout, @Nonnull OmBucketInfo bucketInfo) {
super(omResponse, bucketLayout);
this.dirKeyInfo = dirKeyInfo;
this.parentKeyInfos = parentKeyInfos;
this.result = result;
this.bucketInfo = bucketInfo;
}

/**
Expand Down Expand Up @@ -89,6 +92,10 @@ protected void addToDBBatch(OMMetadataManager omMetadataManager,
dirKeyInfo.getBucketName(), dirKeyInfo.getKeyName());
omMetadataManager.getKeyTable(getBucketLayout())
.putWithBatch(batchOperation, dirKey, dirKeyInfo);
String bucketKey = omMetadataManager.getBucketKey(
bucketInfo.getVolumeName(), bucketInfo.getBucketName());
omMetadataManager.getBucketTable().putWithBatch(batchOperation,
bucketKey, bucketInfo);
} else if (Result.DIRECTORY_ALREADY_EXISTS == result) {
// When directory already exists, we don't add it to cache. And it is
// not an error, in this case dirKeyInfo will be null.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
import org.apache.hadoop.hdds.utils.db.BatchOperation;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequest.Result;
import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
Expand Down Expand Up @@ -49,18 +50,21 @@ public class OMDirectoryCreateResponseWithFSO extends OmKeyResponse {
private Result result;
private long volumeId;
private long bucketId;
private OmBucketInfo bucketInfo;

@SuppressWarnings("checkstyle:ParameterNumber")
public OMDirectoryCreateResponseWithFSO(@Nonnull OMResponse omResponse,
@Nonnull long volumeId, @Nonnull long bucketId,
@Nonnull OmDirectoryInfo dirInfo,
@Nonnull List<OmDirectoryInfo> pDirInfos, @Nonnull Result result,
@Nonnull BucketLayout bucketLayout) {
@Nonnull BucketLayout bucketLayout, @Nonnull OmBucketInfo bucketInfo) {
super(omResponse, bucketLayout);
this.dirInfo = dirInfo;
this.parentDirInfos = pDirInfos;
this.result = result;
this.volumeId = volumeId;
this.bucketId = bucketId;
this.bucketInfo = bucketInfo;
}

/**
Expand Down Expand Up @@ -100,6 +104,10 @@ private void addToDirectoryTable(OMMetadataManager omMetadataManager,
dirInfo.getParentObjectID(), dirInfo.getName());
omMetadataManager.getDirectoryTable().putWithBatch(batchOperation, dirKey,
dirInfo);
String bucketKey = omMetadataManager.getBucketKey(
bucketInfo.getVolumeName(), bucketInfo.getBucketName());
omMetadataManager.getBucketTable().putWithBatch(batchOperation,
bucketKey, bucketInfo);
} else {
// When directory already exists, we don't add it to cache. And it is
// not an error, in this case dirKeyInfo will be null.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.audit.AuditLogger;
import org.apache.hadoop.ozone.audit.AuditMessage;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OMMetrics;
Expand Down Expand Up @@ -156,6 +157,10 @@ public void testValidateAndUpdateCache() throws Exception {
.get(omMetadataManager.getOzoneDirKey(volumeName, bucketName, keyName))
!= null);

OmBucketInfo bucketInfo = omMetadataManager.getBucketTable()
.get(omMetadataManager.getBucketKey(volumeName, bucketName));
Assert.assertEquals(OzoneFSUtils.getFileCount(keyName),
bucketInfo.getUsedNamespace());
}

@Test
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -165,6 +165,11 @@ public void testValidateAndUpdateCache() throws Exception {
Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
== OzoneManagerProtocolProtos.Status.OK);
verifyDirectoriesInDB(dirs, volumeId, bucketId);

OmBucketInfo bucketInfo = omMetadataManager.getBucketTable()
.get(omMetadataManager.getBucketKey(volumeName, bucketName));
Assert.assertEquals(OzoneFSUtils.getFileCount(keyName),
bucketInfo.getUsedNamespace());
}

@Test
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,14 +20,17 @@

import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
import org.apache.hadoop.ozone.om.request.OMRequestTestUtils;
import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequest.Result;
import org.apache.hadoop.ozone.om.response.TestOMResponseUtils;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.OMResponse;
Expand All @@ -41,6 +44,7 @@

import java.util.ArrayList;
import java.util.UUID;
import java.util.concurrent.ThreadLocalRandom;

/**
* Tests OMDirectoryCreateResponse.
Expand Down Expand Up @@ -79,6 +83,13 @@ public void testAddToDBBatch() throws Exception {
bucketName, OzoneFSUtils.addTrailingSlashIfNeeded(keyName),
HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE);

ThreadLocalRandom random = ThreadLocalRandom.current();
long usedNamespace = Math.abs(random.nextLong(Long.MAX_VALUE));
OmBucketInfo omBucketInfo = TestOMResponseUtils.createBucket(
volumeName, bucketName);
omBucketInfo = omBucketInfo.toBuilder()
.setUsedNamespace(usedNamespace).build();

OMResponse omResponse = OMResponse.newBuilder().setCreateDirectoryResponse(
OzoneManagerProtocolProtos.CreateDirectoryResponse.getDefaultInstance())
.setStatus(OzoneManagerProtocolProtos.Status.OK)
Expand All @@ -87,7 +98,7 @@ public void testAddToDBBatch() throws Exception {

OMDirectoryCreateResponse omDirectoryCreateResponse =
new OMDirectoryCreateResponse(omResponse, omKeyInfo,
new ArrayList<>(), Result.SUCCESS, getBucketLayout());
new ArrayList<>(), Result.SUCCESS, getBucketLayout(), omBucketInfo);

omDirectoryCreateResponse.addToDBBatch(omMetadataManager, batchOperation);

Expand All @@ -96,6 +107,12 @@ public void testAddToDBBatch() throws Exception {

Assert.assertNotNull(omMetadataManager.getKeyTable(getBucketLayout()).get(
omMetadataManager.getOzoneDirKey(volumeName, bucketName, keyName)));

Table.KeyValue<String, OmBucketInfo> keyValue =
omMetadataManager.getBucketTable().iterator().next();
Assert.assertEquals(omMetadataManager.getBucketKey(volumeName,
bucketName), keyValue.getKey());
Assert.assertEquals(usedNamespace, keyValue.getValue().getUsedNamespace());
}

public BucketLayout getBucketLayout() {
Expand Down
Loading