Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@

package org.apache.hadoop.ozone.recon;

import static org.apache.hadoop.hdds.client.ReplicationFactor.THREE;
import static org.apache.hadoop.hdds.client.ReplicationType.RATIS;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
Expand All @@ -29,6 +31,7 @@

import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
Expand All @@ -39,6 +42,9 @@
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdds.client.DefaultReplicationConfig;
import org.apache.hadoop.hdds.client.ECReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
import org.apache.hadoop.hdds.utils.IOUtils;
Expand All @@ -52,6 +58,7 @@
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.QuotaUtil;
import org.apache.hadoop.ozone.recon.api.OMDBInsightEndpoint;
import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResponse;
import org.apache.hadoop.ozone.recon.api.types.NSSummary;
Expand All @@ -63,7 +70,9 @@
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

Expand All @@ -77,42 +86,41 @@ public class TestReconInsightsForDeletedDirectories {
LoggerFactory.getLogger(TestReconInsightsForDeletedDirectories.class);

private static MiniOzoneCluster cluster;
private static FileSystem fs;
private static String volumeName;
private static String bucketName;
private FileSystem fs;
private static OzoneClient client;
private static ReconService recon;
private static OzoneConfiguration conf;

@BeforeAll
public static void init() throws Exception {
OzoneConfiguration conf = new OzoneConfiguration();
conf = new OzoneConfiguration();
conf.setInt(OZONE_DIR_DELETING_SERVICE_INTERVAL, 1000000);
conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 10000000,
TimeUnit.MILLISECONDS);
conf.setBoolean(OZONE_ACL_ENABLED, true);
recon = new ReconService(conf);
cluster = MiniOzoneCluster.newBuilder(conf)
.setNumDatanodes(3)
.setNumDatanodes(5)
.addService(recon)
.build();
cluster.waitForClusterToBeReady();
client = cluster.newClient();

// create a volume and a bucket to be used by OzoneFileSystem
OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client,
BucketLayout.FILE_SYSTEM_OPTIMIZED);
volumeName = bucket.getVolumeName();
bucketName = bucket.getName();

String rootPath = String.format("%s://%s.%s/",
OzoneConsts.OZONE_URI_SCHEME, bucketName, volumeName);

// Set the fs.defaultFS and start the filesystem
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
// Set the number of keys to be processed during batch operate.
conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 5);
}

fs = FileSystem.get(conf);
/**
* Provides a list of replication configurations (RATIS and EC)
* to be used for parameterized tests.
*
* @return List of replication configurations as Arguments.
*/
static List<Arguments> replicationConfigs() {
return Arrays.asList(
Arguments.of(ReplicationConfig.fromTypeAndFactor(RATIS, THREE)),
Arguments.of(new ECReplicationConfig("RS-3-2-1024k"))
);
}

@AfterAll
Expand All @@ -121,7 +129,6 @@ public static void teardown() {
if (cluster != null) {
cluster.shutdown();
}
IOUtils.closeQuietly(fs);
}

@AfterEach
Expand All @@ -133,6 +140,8 @@ public void cleanup() throws IOException {
fs.delete(fileStatus.getPath(), true);
}
});

IOUtils.closeQuietly(fs);
}

/**
Expand All @@ -144,9 +153,16 @@ public void cleanup() throws IOException {
* ├── ...
* └── file10
*/
@Test
public void testGetDeletedDirectoryInfo()
@ParameterizedTest
@MethodSource("replicationConfigs")
public void testGetDeletedDirectoryInfo(ReplicationConfig replicationConfig)
throws Exception {
OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client, BucketLayout.FILE_SYSTEM_OPTIMIZED,
new DefaultReplicationConfig(replicationConfig));
String rootPath = String.format("%s://%s.%s/", OzoneConsts.OZONE_URI_SCHEME, bucket.getName(),
bucket.getVolumeName());
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
fs = FileSystem.get(conf);

// Create a directory structure with 10 files in dir1.
Path dir1 = new Path("/dir1");
Expand Down Expand Up @@ -210,6 +226,7 @@ public void testGetDeletedDirectoryInfo()
// Assert that the directory dir1 has 10 sub-files and size of 1000 bytes.
assertEquals(10, summary.getNumOfFiles());
assertEquals(10, summary.getSizeOfFiles());
assertEquals(QuotaUtil.getReplicatedSize(10, replicationConfig), summary.getReplicatedSizeOfFiles());
}

// Delete the entire directory dir1.
Expand Down Expand Up @@ -237,6 +254,7 @@ public void testGetDeletedDirectoryInfo()
(KeyInsightInfoResponse) deletedDirInfo.getEntity();
// Assert the size of deleted directory is 10.
assertEquals(10, entity.getUnreplicatedDataSize());
assertEquals(QuotaUtil.getReplicatedSize(10, replicationConfig), entity.getReplicatedDataSize());

// Cleanup the tables.
cleanupTables();
Expand All @@ -254,9 +272,16 @@ public void testGetDeletedDirectoryInfo()
* │ │ └── file3
*
*/
@Test
public void testGetDeletedDirectoryInfoForNestedDirectories()
@ParameterizedTest
@MethodSource("replicationConfigs")
public void testGetDeletedDirectoryInfoForNestedDirectories(ReplicationConfig replicationConfig)
throws Exception {
OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client, BucketLayout.FILE_SYSTEM_OPTIMIZED,
new DefaultReplicationConfig(replicationConfig));
String rootPath = String.format("%s://%s.%s/", OzoneConsts.OZONE_URI_SCHEME, bucket.getName(),
bucket.getVolumeName());
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
fs = FileSystem.get(conf);

// Create a directory structure with 10 files and 3 nested directories.
Path path = new Path("/dir1/dir2/dir3");
Expand Down Expand Up @@ -326,6 +351,7 @@ public void testGetDeletedDirectoryInfoForNestedDirectories()
(KeyInsightInfoResponse) deletedDirInfo.getEntity();
// Assert the size of deleted directory is 3.
assertEquals(3, entity.getUnreplicatedDataSize());
assertEquals(QuotaUtil.getReplicatedSize(3, replicationConfig), entity.getReplicatedDataSize());

// Cleanup the tables.
cleanupTables();
Expand All @@ -352,9 +378,18 @@ public void testGetDeletedDirectoryInfoForNestedDirectories()
* ├── ...
* └── file10
*/
@Test
public void testGetDeletedDirectoryInfoWithMultipleSubdirectories()
@ParameterizedTest
@MethodSource("replicationConfigs")
public void testGetDeletedDirectoryInfoWithMultipleSubdirectories(ReplicationConfig replicationConfig)
throws Exception {
OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client, BucketLayout.FILE_SYSTEM_OPTIMIZED,
new DefaultReplicationConfig(replicationConfig));
String rootPath = String.format("%s://%s.%s/", OzoneConsts.OZONE_URI_SCHEME, bucket.getName(),
bucket.getVolumeName());
// Set the fs.defaultFS and start the filesystem
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
fs = FileSystem.get(conf);

int numSubdirectories = 10;
int filesPerSubdirectory = 10;

Expand Down Expand Up @@ -388,6 +423,7 @@ public void testGetDeletedDirectoryInfoWithMultipleSubdirectories()
(KeyInsightInfoResponse) deletedDirInfo.getEntity();
// Assert the size of deleted directory is 100.
assertEquals(100, entity.getUnreplicatedDataSize());
assertEquals(QuotaUtil.getReplicatedSize(100, replicationConfig), entity.getReplicatedDataSize());

// Cleanup the tables.
cleanupTables();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
import java.util.Scanner;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.hdds.client.DefaultReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
Expand Down Expand Up @@ -65,14 +66,32 @@ public static OzoneBucket createVolumeAndBucket(OzoneClient client,
}

public static OzoneBucket createVolumeAndBucket(OzoneClient client,
String volumeName, String bucketName, BucketLayout bucketLayout)
String volumeName, String bucketName, BucketLayout bucketLayout) throws IOException {
BucketArgs omBucketArgs;
BucketArgs.Builder builder = BucketArgs.newBuilder();
builder.setStorageType(StorageType.DISK);
if (bucketLayout != null) {
builder.setBucketLayout(bucketLayout);
}
omBucketArgs = builder.build();

return createVolumeAndBucket(client, volumeName, bucketName,
omBucketArgs);
}

public static OzoneBucket createVolumeAndBucket(OzoneClient client,
String volumeName, String bucketName, BucketLayout bucketLayout, DefaultReplicationConfig replicationConfig)
throws IOException {
BucketArgs omBucketArgs;
BucketArgs.Builder builder = BucketArgs.newBuilder();
builder.setStorageType(StorageType.DISK);
if (bucketLayout != null) {
builder.setBucketLayout(bucketLayout);
}

if (replicationConfig != null) {
builder.setDefaultReplicationConfig(replicationConfig);
}
omBucketArgs = builder.build();

return createVolumeAndBucket(client, volumeName, bucketName,
Expand Down Expand Up @@ -197,18 +216,26 @@ public static OzoneBucket createLinkedBucket(OzoneClient client, String vol, Str
public static OzoneBucket createVolumeAndBucket(OzoneClient client,
BucketLayout bucketLayout)
throws IOException {
return createVolumeAndBucket(client, bucketLayout, false);
return createVolumeAndBucket(client, bucketLayout, null, false);
}

public static OzoneBucket createVolumeAndBucket(OzoneClient client,
BucketLayout bucketLayout, boolean createLinkedBucket) throws IOException {
public static OzoneBucket createVolumeAndBucket(OzoneClient client, BucketLayout bucketLayout,
DefaultReplicationConfig replicationConfig)
throws IOException {
return createVolumeAndBucket(client, bucketLayout, replicationConfig, false);
}

public static OzoneBucket createVolumeAndBucket(OzoneClient client, BucketLayout bucketLayout,
DefaultReplicationConfig replicationConfig,
boolean createLinkedBucket)
throws IOException {
final int attempts = 5;
for (int i = 0; i < attempts; i++) {
try {
String volumeName = "volume" + RandomStringUtils.secure().nextNumeric(5);
String bucketName = "bucket" + RandomStringUtils.secure().nextNumeric(5);
OzoneBucket ozoneBucket = createVolumeAndBucket(client, volumeName, bucketName,
bucketLayout);
bucketLayout, replicationConfig);
if (createLinkedBucket) {
String targetBucketName = ozoneBucket.getName() + RandomStringUtils.secure().nextNumeric(5);
ozoneBucket = createLinkedBucket(client, volumeName, bucketName, targetBucketName);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -238,7 +238,7 @@ private void init() throws Exception {
cluster.waitForClusterToBeReady();
client = cluster.newClient();
// create a volume and a bucket to be used by OzoneFileSystem
ozoneBucket = TestDataUtil.createVolumeAndBucket(client, bucketLayout, createLinkedBucket);
ozoneBucket = TestDataUtil.createVolumeAndBucket(client, bucketLayout, null, createLinkedBucket);
if (createLinkedBucket) {
this.linkedBuckets.put(ozoneBucket.getName(), ozoneBucket.getSourceBucket());
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ public void setupFsClient() throws IOException {
writeClient = objectStore.getClientProxy().getOzoneManagerClient();
ozoneManager = cluster().getOzoneManager();

OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client, bucketLayout, createLinkedBuckets);
OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client, bucketLayout, null, createLinkedBuckets);
if (createLinkedBuckets) {
linkedBucketMaps.put(bucket.getName(), bucket.getSourceBucket());
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,10 @@ public Injector getInjector() {
return injector;
}

public static Injector getStaticInjector() {
return injector;
}

static void setInjector(Injector inj) {
injector = inj;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,10 @@

import com.google.common.annotations.VisibleForTesting;
import java.io.IOException;
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Deque;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
Expand All @@ -58,6 +60,7 @@
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.TableIterator;
Expand Down Expand Up @@ -646,9 +649,9 @@ private void getPendingForDeletionDirInfo(
keyEntityInfo.setKey(omKeyInfo.getFileName());
keyEntityInfo.setPath(createPath(omKeyInfo));
keyEntityInfo.setInStateSince(omKeyInfo.getCreationTime());
keyEntityInfo.setSize(
fetchSizeForDeletedDirectory(omKeyInfo.getObjectID()));
keyEntityInfo.setReplicatedSize(omKeyInfo.getReplicatedSize());
Pair<Long, Long> sizeInfo = fetchSizeForDeletedDirectory(omKeyInfo.getObjectID());
keyEntityInfo.setSize(sizeInfo.getLeft());
keyEntityInfo.setReplicatedSize(sizeInfo.getRight());
keyEntityInfo.setReplicationConfig(omKeyInfo.getReplicationConfig());
pendingForDeletionKeyInfo.setUnreplicatedDataSize(
pendingForDeletionKeyInfo.getUnreplicatedDataSize() +
Expand All @@ -674,24 +677,32 @@ private void getPendingForDeletionDirInfo(
}

/**
* Given an object ID, return total data size (no replication)
* Given an object ID, return total data size as a pair of Total Size, Total Replicated Size
* under this object. Note:- This method is RECURSIVE.
*
* @param objectId the object's ID
* @return total used data size in bytes
* @return total used data size and replicated total used data size in bytes
* @throws IOException ioEx
*/
protected long fetchSizeForDeletedDirectory(long objectId)
protected Pair<Long, Long> fetchSizeForDeletedDirectory(long objectId)
throws IOException {
NSSummary nsSummary = reconNamespaceSummaryManager.getNSSummary(objectId);
if (nsSummary == null) {
return 0L;
}
long totalSize = nsSummary.getSizeOfFiles();
for (long childId : nsSummary.getChildDir()) {
totalSize += fetchSizeForDeletedDirectory(childId);
long totalSize = 0;
long totalReplicatedSize = 0;
Deque<Long> stack = new ArrayDeque();
stack.push(objectId);

while (!stack.isEmpty()) {
long currentId = stack.pop();
NSSummary nsSummary = reconNamespaceSummaryManager.getNSSummary(currentId);
if (nsSummary != null) {
totalSize += nsSummary.getSizeOfFiles();
totalReplicatedSize += nsSummary.getReplicatedSizeOfFiles();
for (long childId : nsSummary.getChildDir()) {
stack.push(childId);
}
}
}
return totalSize;
return Pair.of(totalSize, totalReplicatedSize);
}

/** This method retrieves set of directories pending for deletion.
Expand Down
Loading