Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ static void init() throws Exception {
String bucketName = "bucket1";

ozoneBucket = TestDataUtil.createVolumeAndBucket(
client, volumeName, bucketName, BucketLayout.FILE_SYSTEM_OPTIMIZED);
client, volumeName, bucketName, BucketLayout.FILE_SYSTEM_OPTIMIZED, null);

String keyNameR3 = "key1";
containerIdR3 = setupRatisKey(recon, keyNameR3,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@

package org.apache.hadoop.ozone.recon;

import static org.apache.hadoop.hdds.client.ReplicationFactor.THREE;
import static org.apache.hadoop.hdds.client.ReplicationType.RATIS;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
Expand All @@ -39,6 +41,8 @@
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdds.client.DefaultReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
import org.apache.hadoop.hdds.utils.IOUtils;
Expand All @@ -52,6 +56,7 @@
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.QuotaUtil;
import org.apache.hadoop.ozone.recon.api.OMDBInsightEndpoint;
import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResponse;
import org.apache.hadoop.ozone.recon.api.types.NSSummary;
Expand Down Expand Up @@ -80,9 +85,66 @@ public class TestReconInsightsForDeletedDirectories {
private static FileSystem fs;
private static String volumeName;
private static String bucketName;
private static ReplicationConfig replicationConfig;
private static OzoneClient client;
private static ReconService recon;

protected static MiniOzoneCluster getCluster() {
return cluster;
}

protected static void setCluster(MiniOzoneCluster cluster) {
TestReconInsightsForDeletedDirectories.cluster = cluster;
}

protected static FileSystem getFs() {
return fs;
}

protected static void setFs(FileSystem fs) {
TestReconInsightsForDeletedDirectories.fs = fs;
}

protected static String getVolumeName() {
return volumeName;
}

protected static void setVolumeName(String volumeName) {
TestReconInsightsForDeletedDirectories.volumeName = volumeName;
}

protected static String getBucketName() {
return bucketName;
}

protected static void setBucketName(String bucketName) {
TestReconInsightsForDeletedDirectories.bucketName = bucketName;
}

protected static ReplicationConfig getReplicationConfig() {
return replicationConfig;
}

protected static void setReplicationConfig(ReplicationConfig replicationConfig) {
TestReconInsightsForDeletedDirectories.replicationConfig = replicationConfig;
}

protected static OzoneClient getClient() {
return client;
}

protected static void setClient(OzoneClient client) {
TestReconInsightsForDeletedDirectories.client = client;
}

protected static ReconService getRecon() {
return recon;
}

protected static void setRecon(ReconService recon) {
TestReconInsightsForDeletedDirectories.recon = recon;
}

@BeforeAll
public static void init() throws Exception {
OzoneConfiguration conf = new OzoneConfiguration();
Expand All @@ -99,8 +161,9 @@ public static void init() throws Exception {
client = cluster.newClient();

// create a volume and a bucket to be used by OzoneFileSystem
OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client,
BucketLayout.FILE_SYSTEM_OPTIMIZED);
replicationConfig = ReplicationConfig.fromTypeAndFactor(RATIS, THREE);
OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client, BucketLayout.FILE_SYSTEM_OPTIMIZED,
new DefaultReplicationConfig(replicationConfig));
volumeName = bucket.getVolumeName();
bucketName = bucket.getName();

Expand Down Expand Up @@ -147,7 +210,6 @@ public void cleanup() throws IOException {
@Test
public void testGetDeletedDirectoryInfo()
throws Exception {

// Create a directory structure with 10 files in dir1.
Path dir1 = new Path("/dir1");
fs.mkdirs(dir1);
Expand Down Expand Up @@ -210,6 +272,7 @@ public void testGetDeletedDirectoryInfo()
// Assert that the directory dir1 has 10 sub-files and size of 1000 bytes.
assertEquals(10, summary.getNumOfFiles());
assertEquals(10, summary.getSizeOfFiles());
assertEquals(QuotaUtil.getReplicatedSize(10, replicationConfig), summary.getReplicatedSizeOfFiles());
}

// Delete the entire directory dir1.
Expand Down Expand Up @@ -237,6 +300,7 @@ public void testGetDeletedDirectoryInfo()
(KeyInsightInfoResponse) deletedDirInfo.getEntity();
// Assert the size of deleted directory is 10.
assertEquals(10, entity.getUnreplicatedDataSize());
assertEquals(QuotaUtil.getReplicatedSize(10, replicationConfig), entity.getReplicatedDataSize());

// Cleanup the tables.
cleanupTables();
Expand All @@ -257,7 +321,6 @@ public void testGetDeletedDirectoryInfo()
@Test
public void testGetDeletedDirectoryInfoForNestedDirectories()
throws Exception {

// Create a directory structure with 10 files and 3 nested directories.
Path path = new Path("/dir1/dir2/dir3");
fs.mkdirs(path);
Expand Down Expand Up @@ -326,6 +389,7 @@ public void testGetDeletedDirectoryInfoForNestedDirectories()
(KeyInsightInfoResponse) deletedDirInfo.getEntity();
// Assert the size of deleted directory is 3.
assertEquals(3, entity.getUnreplicatedDataSize());
assertEquals(QuotaUtil.getReplicatedSize(3, replicationConfig), entity.getReplicatedDataSize());

// Cleanup the tables.
cleanupTables();
Expand Down Expand Up @@ -388,6 +452,7 @@ public void testGetDeletedDirectoryInfoWithMultipleSubdirectories()
(KeyInsightInfoResponse) deletedDirInfo.getEntity();
// Assert the size of deleted directory is 100.
assertEquals(100, entity.getUnreplicatedDataSize());
assertEquals(QuotaUtil.getReplicatedSize(100, replicationConfig), entity.getReplicatedDataSize());

// Cleanup the tables.
cleanupTables();
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.hadoop.ozone.recon;

import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL;

import java.util.concurrent.TimeUnit;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdds.client.DefaultReplicationConfig;
import org.apache.hadoop.hdds.client.ECReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.TestDataUtil;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.junit.jupiter.api.BeforeAll;

/**
* Test class to verify the correctness of the insights generated by Recon
* for Deleted Directories for EC ReplicationConfig.
*/
public class TestReconInsightsForDeletedDirectoriesEC
extends TestReconInsightsForDeletedDirectories {

@BeforeAll
public static void init() throws Exception {
OzoneConfiguration conf = new OzoneConfiguration();
conf.setInt(OZONE_DIR_DELETING_SERVICE_INTERVAL, 1000000);
conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 10000000,
TimeUnit.MILLISECONDS);
conf.setBoolean(OZONE_ACL_ENABLED, true);
setRecon(new ReconService(conf));
setCluster(MiniOzoneCluster.newBuilder(conf)
.setNumDatanodes(5)
.addService(getRecon())
.build());
getCluster().waitForClusterToBeReady();
setClient(getCluster().newClient());
setReplicationConfig(new ECReplicationConfig("RS-3-2-1024k"));
OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(getClient(), BucketLayout.FILE_SYSTEM_OPTIMIZED,
new DefaultReplicationConfig(getReplicationConfig()));
setVolumeName(bucket.getVolumeName());
setBucketName(bucket.getName());

String rootPath = String.format("%s://%s.%s/",
OzoneConsts.OZONE_URI_SCHEME, getBucketName(), getVolumeName());

conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 5);

setFs(FileSystem.get(conf));
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -265,7 +265,7 @@ public void testRecoveryWithoutHsyncHflushOnLastBlock() throws Exception {
@Test
public void testOBSRecoveryShouldFail() throws Exception {
OzoneBucket obsBucket = TestDataUtil.createVolumeAndBucket(client,
"vol2", "obs", BucketLayout.OBJECT_STORE);
"vol2", "obs", BucketLayout.OBJECT_STORE, null);
String obsDir = OZONE_ROOT + obsBucket.getVolumeName() + OZONE_URI_DELIMITER + obsBucket.getName();
Path obsFile = new Path(obsDir, "file" + getTestName() + FILE_COUNTER.incrementAndGet());

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ public void init() throws Exception {
bucketName = RandomStringUtils.secure().nextAlphabetic(10).toLowerCase();

// create a volume and a bucket to be used by OzoneFileSystem
TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName, BucketLayout.LEGACY);
TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName, BucketLayout.LEGACY, null);

String rootPath = String.format("%s://%s.%s/", OZONE_URI_SCHEME, bucketName,
volumeName);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ public static void init() throws Exception {
// create a volume and a bucket to be used by OzoneFileSystem
try (OzoneClient client = cluster.newClient()) {
TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName,
BucketLayout.FILE_SYSTEM_OPTIMIZED);
BucketLayout.FILE_SYSTEM_OPTIMIZED, null);
}

String rootPath = String
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
import java.util.Scanner;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.hdds.client.DefaultReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
Expand Down Expand Up @@ -61,18 +62,22 @@ private TestDataUtil() {

public static OzoneBucket createVolumeAndBucket(OzoneClient client,
String volumeName, String bucketName) throws IOException {
return createVolumeAndBucket(client, volumeName, bucketName, getDefaultBucketLayout(client));
return createVolumeAndBucket(client, volumeName, bucketName, getDefaultBucketLayout(client), null);
}

public static OzoneBucket createVolumeAndBucket(OzoneClient client,
String volumeName, String bucketName, BucketLayout bucketLayout)
String volumeName, String bucketName, BucketLayout bucketLayout, DefaultReplicationConfig replicationConfig)
throws IOException {
BucketArgs omBucketArgs;
BucketArgs.Builder builder = BucketArgs.newBuilder();
builder.setStorageType(StorageType.DISK);
if (bucketLayout != null) {
builder.setBucketLayout(bucketLayout);
}

if (replicationConfig != null) {
builder.setDefaultReplicationConfig(replicationConfig);
}
omBucketArgs = builder.build();

return createVolumeAndBucket(client, volumeName, bucketName,
Expand Down Expand Up @@ -197,18 +202,26 @@ public static OzoneBucket createLinkedBucket(OzoneClient client, String vol, Str
public static OzoneBucket createVolumeAndBucket(OzoneClient client,
BucketLayout bucketLayout)
throws IOException {
return createVolumeAndBucket(client, bucketLayout, false);
return createVolumeAndBucket(client, bucketLayout, null, false);
}

public static OzoneBucket createVolumeAndBucket(OzoneClient client,
BucketLayout bucketLayout, boolean createLinkedBucket) throws IOException {
public static OzoneBucket createVolumeAndBucket(OzoneClient client, BucketLayout bucketLayout,
DefaultReplicationConfig replicationConfig)
throws IOException {
return createVolumeAndBucket(client, bucketLayout, replicationConfig, false);
}

public static OzoneBucket createVolumeAndBucket(OzoneClient client, BucketLayout bucketLayout,
DefaultReplicationConfig replicationConfig,
boolean createLinkedBucket)
throws IOException {
final int attempts = 5;
for (int i = 0; i < attempts; i++) {
try {
String volumeName = "volume" + RandomStringUtils.secure().nextNumeric(5);
String bucketName = "bucket" + RandomStringUtils.secure().nextNumeric(5);
OzoneBucket ozoneBucket = createVolumeAndBucket(client, volumeName, bucketName,
bucketLayout);
bucketLayout, replicationConfig);
if (createLinkedBucket) {
String targetBucketName = ozoneBucket.getName() + RandomStringUtils.secure().nextNumeric(5);
ozoneBucket = createLinkedBucket(client, volumeName, bucketName, targetBucketName);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ public void init() throws Exception {

// create a volume and a bucket to be used by OzoneFileSystem
TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName,
BucketLayout.OBJECT_STORE);
BucketLayout.OBJECT_STORE, null);
volume = client.getObjectStore().getVolume(volumeName);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -357,7 +357,7 @@ public void testKeyOps() throws Exception {
long initialNumDeleteObjectTaggingFails = getLongCounter("NumDeleteObjectTaggingFails", omMetrics);

// see HDDS-10078 for making this work with FILE_SYSTEM_OPTIMIZED layout
TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName, BucketLayout.LEGACY);
TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName, BucketLayout.LEGACY, null);
OmKeyArgs keyArgs = createKeyArgs(volumeName, bucketName,
RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE));
doKeyOps(keyArgs); // This will perform 7 different operations on the key
Expand Down Expand Up @@ -487,7 +487,7 @@ public void testDirectoryOps(BucketLayout bucketLayout) throws Exception {
String bucketName = UUID.randomUUID().toString();

// create bucket with different layout in each ParameterizedTest
TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName, bucketLayout);
TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName, bucketLayout, null);

// Create bucket with 2 nested directories.
String rootPath = String.format("%s://%s/",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,7 @@ public void setup() throws Exception {
client = cluster.newClient();
om = cluster.getOzoneManager();
bucket1 = TestDataUtil.createVolumeAndBucket(
client, VOLUME_NAME, BUCKET_NAME_ONE, BucketLayout.DEFAULT);
client, VOLUME_NAME, BUCKET_NAME_ONE, BucketLayout.DEFAULT, null);
}

@AfterAll
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -238,7 +238,7 @@ private void init() throws Exception {
cluster.waitForClusterToBeReady();
client = cluster.newClient();
// create a volume and a bucket to be used by OzoneFileSystem
ozoneBucket = TestDataUtil.createVolumeAndBucket(client, bucketLayout, createLinkedBucket);
ozoneBucket = TestDataUtil.createVolumeAndBucket(client, bucketLayout, null, createLinkedBucket);
if (createLinkedBucket) {
this.linkedBuckets.put(ozoneBucket.getName(), ozoneBucket.getSourceBucket());
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,7 @@ private void writeKey(String volumeName, String bucketName,
ReplicationFactor.THREE);
}
TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName,
layout);
layout, null);
TestDataUtil.createKey(
client.getObjectStore().getVolume(volumeName).getBucket(bucketName),
keyName, repConfig, "test".getBytes(StandardCharsets.UTF_8));
Expand Down
Loading
Loading