Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -19,22 +19,23 @@
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.utils.IOUtils;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.TestDataUtil;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.OzoneClientFactory;
import org.apache.hadoop.ozone.client.OzoneKey;
import org.apache.hadoop.ozone.client.io.OzoneInputStream;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.ozone.test.NonHATests;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.api.Timeout;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;

import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
import java.util.Collections;
Expand All @@ -55,35 +56,30 @@
* Test covers listKeys(keyPrefix, startKey, shallow) combinations
* in a legacy/OBS bucket layout type.
*/
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
@Timeout(1200)
public class TestListKeys {
public abstract class TestListKeys implements NonHATests.TestCase {

private static MiniOzoneCluster cluster = null;
private OzoneBucket legacyOzoneBucket;
private OzoneBucket obsOzoneBucket;
private OzoneClient client;
private boolean originalFileSystemPathEnabled;
private long originalMaxListSize;

private static OzoneConfiguration conf;

private static OzoneBucket legacyOzoneBucket;

private static OzoneBucket obsOzoneBucket;
private static OzoneClient client;

/**
* Create a MiniDFSCluster for testing.
* <p>
*
* @throws IOException
*/
@BeforeAll
public static void init() throws Exception {
conf = new OzoneConfiguration();
conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true);
void init() throws Exception {
OmConfig omConfig = cluster().getOzoneManager().getConfig();
originalFileSystemPathEnabled = omConfig.isFileSystemPathEnabled();
omConfig.setFileSystemPathEnabled(true);
originalMaxListSize = omConfig.getMaxListSize();
omConfig.setMaxListSize(2);

OzoneConfiguration conf = new OzoneConfiguration(cluster().getConf());
// Set the number of keys to be processed during batch operate.
conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 3);
conf.setInt(OZONE_CLIENT_LIST_CACHE_SIZE, 3);
conf.setInt(OmConfig.Keys.SERVER_LIST_MAX_SIZE, 2);
cluster = MiniOzoneCluster.newBuilder(conf).build();
cluster.waitForClusterToBeReady();
client = cluster.newClient();

client = OzoneClientFactory.getRpcClient(conf);

// create a volume and a LEGACY bucket
legacyOzoneBucket = TestDataUtil
Expand All @@ -97,14 +93,14 @@ public static void init() throws Exception {
}

@AfterAll
public static void teardownClass() {
void cleanup() {
IOUtils.closeQuietly(client);
if (cluster != null) {
cluster.shutdown();
}
OmConfig omConfig = cluster().getOzoneManager().getConfig();
omConfig.setFileSystemPathEnabled(originalFileSystemPathEnabled);
omConfig.setMaxListSize(originalMaxListSize);
}

private static void initFSNameSpace() throws Exception {
private void initFSNameSpace() throws Exception {
buildNameSpaceTree(legacyOzoneBucket);
buildNameSpaceTree(obsOzoneBucket);
}
Expand Down Expand Up @@ -354,7 +350,7 @@ private void checkKeyShallowList(String keyPrefix, String startKey,
bucket.listKeys(keyPrefix, startKey, true);
ReplicationConfig expectedReplication =
Optional.ofNullable(bucket.getReplicationConfig())
.orElse(cluster.getOzoneManager().getDefaultReplicationConfig());
.orElse(cluster().getOzoneManager().getDefaultReplicationConfig());

List <String> keyLists = new ArrayList<>();
while (ozoneKeyIterator.hasNext()) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,22 +21,23 @@
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.StorageType;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.TestDataUtil;
import org.apache.hadoop.ozone.client.BucketArgs;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.OzoneClientFactory;
import org.apache.hadoop.ozone.client.OzoneKey;
import org.apache.hadoop.ozone.client.OzoneVolume;
import org.apache.hadoop.ozone.client.io.OzoneInputStream;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.ozone.test.NonHATests;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.api.Timeout;

import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
import java.util.Iterator;
Expand All @@ -53,38 +54,34 @@
* Test covers listKeys(keyPrefix, startKey) combinations
* in a FSO bucket layout type.
*/
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
@Timeout(1200)
public class TestListKeysWithFSO {
public abstract class TestListKeysWithFSO implements NonHATests.TestCase {

private OzoneBucket legacyOzoneBucket;
private OzoneBucket fsoOzoneBucket;
private OzoneBucket legacyOzoneBucket2;
private OzoneBucket fsoOzoneBucket2;
private OzoneBucket emptyLegacyOzoneBucket;
private OzoneBucket emptyFsoOzoneBucket;
private OzoneClient client;
private boolean originalFileSystemPathEnabled;
private long originalMaxListSize;

private static MiniOzoneCluster cluster = null;
private static OzoneConfiguration conf;

private static OzoneBucket legacyOzoneBucket;
private static OzoneBucket fsoOzoneBucket;
private static OzoneBucket legacyOzoneBucket2;
private static OzoneBucket fsoOzoneBucket2;
private static OzoneBucket emptyLegacyOzoneBucket;
private static OzoneBucket emptyFsoOzoneBucket;
private static OzoneClient client;

/**
* Create a MiniDFSCluster for testing.
* <p>
*
* @throws IOException
*/
@BeforeAll
public static void init() throws Exception {
conf = new OzoneConfiguration();
conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS,
true);
void init() throws Exception {
OmConfig omConfig = cluster().getOzoneManager().getConfig();
originalFileSystemPathEnabled = omConfig.isFileSystemPathEnabled();
omConfig.setFileSystemPathEnabled(true);
originalMaxListSize = omConfig.getMaxListSize();
omConfig.setMaxListSize(2);

OzoneConfiguration conf = new OzoneConfiguration(cluster().getConf());
// Set the number of keys to be processed during batch operate.
conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 3);
conf.setInt(OZONE_CLIENT_LIST_CACHE_SIZE, 3);
conf.setInt(OmConfig.Keys.SERVER_LIST_MAX_SIZE, 2);
cluster = MiniOzoneCluster.newBuilder(conf).build();
cluster.waitForClusterToBeReady();
client = cluster.newClient();

client = OzoneClientFactory.getRpcClient(conf);

// create a volume and a LEGACY bucket
legacyOzoneBucket = TestDataUtil
Expand Down Expand Up @@ -128,14 +125,14 @@ public static void init() throws Exception {
}

@AfterAll
public static void teardownClass() {
void cleanup() {
IOUtils.closeQuietly(client);
if (cluster != null) {
cluster.shutdown();
}
OmConfig omConfig = cluster().getOzoneManager().getConfig();
omConfig.setFileSystemPathEnabled(originalFileSystemPathEnabled);
omConfig.setMaxListSize(originalMaxListSize);
}

private static void initFSNameSpace() throws Exception {
private void initFSNameSpace() throws Exception {
/*
Keys Namespace:

Expand Down Expand Up @@ -615,7 +612,7 @@ private void checkKeyList(String keyPrefix, String startKey,
fsoBucket.listKeys(keyPrefix, startKey, shallow);
ReplicationConfig expectedReplication =
Optional.ofNullable(fsoBucket.getReplicationConfig())
.orElse(cluster.getOzoneManager().getDefaultReplicationConfig());
.orElse(cluster().getOzoneManager().getDefaultReplicationConfig());

List <String> keyLists = new ArrayList<>();
while (ozoneKeyIterator.hasNext()) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,15 +20,17 @@
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.utils.IOUtils;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.TestDataUtil;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.OzoneClientFactory;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
import org.apache.ozone.test.NonHATests;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.api.Timeout;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
Expand All @@ -48,45 +50,32 @@
/**
* A simple test that asserts that list status output is sorted.
*/
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
@Timeout(1200)
public class TestListStatus {
public abstract class TestListStatus implements NonHATests.TestCase {
private static final Logger LOG = LoggerFactory.getLogger(TestListStatus.class);

private static MiniOzoneCluster cluster = null;
private static OzoneBucket fsoOzoneBucket;
private static OzoneClient client;
private OzoneBucket fsoOzoneBucket;
private OzoneClient client;

/**
* Create a MiniDFSCluster for testing.
* <p>
*
* @throws IOException in case of I/O error
*/
@BeforeAll
public static void init() throws Exception {
OzoneConfiguration conf = new OzoneConfiguration();
conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS,
true);
cluster = MiniOzoneCluster.newBuilder(conf).build();
cluster.waitForClusterToBeReady();
client = cluster.newClient();
void init() throws Exception {
OzoneConfiguration conf = new OzoneConfiguration(cluster().getConf());
// Set the number of keys to be processed during batch operated.
conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 5);

client = OzoneClientFactory.getRpcClient(conf);

// create a volume and a LEGACY bucket
fsoOzoneBucket = TestDataUtil
.createVolumeAndBucket(client, BucketLayout.FILE_SYSTEM_OPTIMIZED);

// Set the number of keys to be processed during batch operated.
conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 5);

buildNameSpaceTree(fsoOzoneBucket);
}

@AfterAll
public static void teardownClass() {
void cleanup() {
IOUtils.closeQuietly(client);
if (cluster != null) {
cluster.shutdown();
}
}

@MethodSource("sortedListStatusParametersSource")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -147,15 +147,39 @@ public MiniOzoneCluster cluster() {
}

@Nested
class ObjectStore extends org.apache.hadoop.ozone.om.TestObjectStore {
class BucketLayoutWithOlderClient extends org.apache.hadoop.ozone.om.TestBucketLayoutWithOlderClient {
@Override
public MiniOzoneCluster cluster() {
return getCluster();
}
}

@Nested
class BucketLayoutWithOlderClient extends org.apache.hadoop.ozone.om.TestBucketLayoutWithOlderClient {
class ListKeys extends org.apache.hadoop.ozone.om.TestListKeys {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can we use qualified imports instead?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, I plan to change it eventually, but this reduces conflicts while working on multiple tasks in parallel.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Fair enough. I think it can be done as soon we enable related checks in PMD.

@Override
public MiniOzoneCluster cluster() {
return getCluster();
}
}

@Nested
class ListKeysWithFSO extends org.apache.hadoop.ozone.om.TestListKeysWithFSO {
@Override
public MiniOzoneCluster cluster() {
return getCluster();
}
}

@Nested
class ListStatus extends org.apache.hadoop.ozone.om.TestListStatus {
@Override
public MiniOzoneCluster cluster() {
return getCluster();
}
}

@Nested
class ObjectStore extends org.apache.hadoop.ozone.om.TestObjectStore {
@Override
public MiniOzoneCluster cluster() {
return getCluster();
Expand Down