Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,8 @@
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_LISTING_PAGE_SIZE;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION_TYPE;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_SCHEME;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;

Expand All @@ -33,17 +35,30 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.contract.ContractTestUtils;
import org.apache.hadoop.hdds.conf.ConfigurationTarget;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.ratis.util.Preconditions;

/**
* Common test cases for Ozone file systems.
*/
final class OzoneFileSystemTests {
public final class OzoneFileSystemTests {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can we rename this class to something like OzoneFileSystemTestUtils? Right now it feels like it is a test case rather than collection of utils.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There is lot of duplication between AbstractOzoneFileSystemTest and AbstractRootedOzoneFileSystemTest. This class is intended as a place where we would move those common test cases, not as a utility. So far only listStatusIteratorOnPageSize was moved in HDDS-9328.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks for clarification!

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Created HDDS-12355.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can you add TODO with Jira link?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We use Jira for tasks, not TODO items in the code. If someone reports a bug or an improvement idea, we don't go and edit code just to mention those.

Also, the item we discussed is not related to the changes being done in the PR, which touches this class only superficially. If I was working on a series of tasks, it would be OK to mention a follow-up task in a comment to help remember items or details about the follow-up.


private OzoneFileSystemTests() {
// no instances
}

/**
* Set file system listing page size. Also disable the file system cache to
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
* Set file system listing page size. Also disable the file system cache to
* Set file system listing page size. Also disable the file system cache to

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do not forget to remove this space :)

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I use two spaces intentionally.

* ensure new {@link FileSystem} instance reflects the updated page size.
*/
public static void setPageSize(ConfigurationTarget conf, int pageSize) {
Preconditions.assertTrue(pageSize > 0, () -> "pageSize=" + pageSize + " <= 0");
conf.setInt(OZONE_FS_LISTING_PAGE_SIZE, pageSize);
conf.setBoolean(String.format("fs.%s.impl.disable.cache", OZONE_URI_SCHEME), true);
conf.setBoolean(String.format("fs.%s.impl.disable.cache", OZONE_OFS_URI_SCHEME), true);
}

/**
* Tests listStatusIterator operation on directory with different
* numbers of child directories.
Expand All @@ -60,10 +75,8 @@ public static void listStatusIteratorOnPageSize(OzoneConfiguration conf,
pageSize + pageSize
};
OzoneConfiguration config = new OzoneConfiguration(conf);
config.setInt(OZONE_FS_LISTING_PAGE_SIZE, pageSize);
setPageSize(config, pageSize);
URI uri = FileSystem.getDefaultUri(config);
config.setBoolean(
String.format("fs.%s.impl.disable.cache", uri.getScheme()), true);
try (FileSystem subject = FileSystem.get(uri, config)) {
Path dir = new Path(Objects.requireNonNull(rootPath),
"listStatusIterator");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,148 +17,110 @@

package org.apache.hadoop.ozone.freon;

import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_SCHEME;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;

import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import org.apache.commons.io.FileUtils;
import java.util.UUID;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.ozone.OzoneFileSystemTests;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.conf.StorageSize;
import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.utils.IOUtils;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.client.BucketArgs;
import org.apache.hadoop.ozone.client.ObjectStore;
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.OzoneClientFactory;
import org.apache.hadoop.ozone.client.OzoneVolume;
import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.ozone.test.GenericTestUtils;
import org.apache.ratis.server.RaftServer;
import org.apache.ratis.server.raftlog.RaftLog;
import org.apache.ozone.test.NonHATests;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.EnumSource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.event.Level;

/**
* Test for HadoopDirTreeGenerator.
*/
public class TestHadoopDirTreeGenerator {
@TempDir
private java.nio.file.Path path;
private OzoneConfiguration conf = null;
private MiniOzoneCluster cluster = null;
public abstract class TestHadoopDirTreeGenerator implements NonHATests.TestCase {

private static final int PAGE_SIZE = 10;

private ObjectStore store = null;
private static final Logger LOG =
LoggerFactory.getLogger(TestHadoopDirTreeGenerator.class);
private OzoneClient client;

@BeforeEach
public void setup() {
GenericTestUtils.setLogLevel(RaftLog.LOG, Level.DEBUG);
GenericTestUtils.setLogLevel(RaftServer.LOG, Level.DEBUG);
}

/**
* Shutdown MiniDFSCluster.
*/
private void shutdown() throws IOException {
IOUtils.closeQuietly(client);
if (cluster != null) {
cluster.shutdown();
}
}

/**
* Create a MiniDFSCluster for testing.
*
* @throws IOException
*/
private void startCluster() throws Exception {
conf = getOzoneConfiguration();
conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT,
BucketLayout.LEGACY.name());
cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(5).build();
cluster.waitForClusterToBeReady();
cluster.waitTobeOutOfSafeMode();

client = OzoneClientFactory.getRpcClient(conf);
void setup() throws Exception {
client = cluster().newClient();
store = client.getObjectStore();
}

protected OzoneConfiguration getOzoneConfiguration() {
return new OzoneConfiguration();
@AfterEach
void cleanup() {
IOUtils.closeQuietly(client);
}

@Test
public void testNestedDirTreeGeneration() throws Exception {
try {
startCluster();
FileOutputStream out = FileUtils.openOutputStream(new File(path.toString(),
"conf"));
cluster.getConf().writeXml(out);
out.getFD().sync();
out.close();

verifyDirTree("vol1", "bucket1", 1,
1, 1, "0");
verifyDirTree("vol2", "bucket1", 1,
5, 1, "5B");
verifyDirTree("vol3", "bucket1", 2,
5, 3, "1B");
verifyDirTree("vol4", "bucket1", 3,
2, 4, "2B");
verifyDirTree("vol5", "bucket1", 5,
4, 1, "0");
// default page size is Constants.LISTING_PAGE_SIZE = 1024
verifyDirTree("vol6", "bucket1", 2,
1, 1100, "0");
} finally {
shutdown();
}
@ParameterizedTest
@EnumSource(names = {"FILE_SYSTEM_OPTIMIZED", "LEGACY"})
public void testNestedDirTreeGeneration(BucketLayout layout) throws Exception {
String uuid = UUID.randomUUID().toString();
verifyDirTree("vol1-" + uuid, "bucket1", 1, 1, 1, "0", layout);
verifyDirTree("vol2-" + uuid, "bucket1", 1, 5, 1, "5B", layout);
verifyDirTree("vol3-" + uuid, "bucket1", 2, 5, 3, "1B", layout);
verifyDirTree("vol4-" + uuid, "bucket1", 3, 2, 4, "2B", layout);
verifyDirTree("vol5-" + uuid, "bucket1", 5, 4, 1, "0", layout);
verifyDirTree("vol6-" + uuid, "bucket1", 2, 1, PAGE_SIZE + PAGE_SIZE / 2, "0", layout);
}

private void verifyDirTree(String volumeName, String bucketName, int depth,
int span, int fileCount, String perFileSize)
int span, int fileCount, String perFileSize, BucketLayout layout)
throws IOException {

store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
volume.createBucket(bucketName);
String rootPath = "o3fs://" + bucketName + "." + volumeName;
String confPath = new File(path.toString(), "conf").getAbsolutePath();
new Freon().execute(
new String[]{"-conf", confPath, "dtsg", "-d", depth + "", "-c",
fileCount + "", "-s", span + "", "-n", "1", "-r", rootPath,
"-g", perFileSize});
volume.createBucket(bucketName, BucketArgs.newBuilder().setBucketLayout(layout).build());
String rootPath = OZONE_URI_SCHEME + "://" + bucketName + "." + volumeName;
String om = cluster().getConf().get(OZONE_OM_ADDRESS_KEY);
new Freon().getCmd().execute(
"-D", OZONE_OM_ADDRESS_KEY + "=" + om,
"dtsg",
"-c", String.valueOf(fileCount),
"-d", String.valueOf(depth),
"-g", perFileSize,
"-n", "1",
"-r", rootPath,
"-s", String.valueOf(span)
);
// verify the directory structure
LOG.info("Started verifying the directory structure...");
FileSystem fileSystem = FileSystem.get(URI.create(rootPath),
conf);
Path rootDir = new Path(rootPath.concat("/"));
// verify root path details
FileStatus[] fileStatuses = fileSystem.listStatus(rootDir);
// verify the num of peer directories, expected span count is 1
// as it has only one dir at root.
verifyActualSpan(1, Arrays.asList(fileStatuses));
for (FileStatus fileStatus : fileStatuses) {
int actualDepth =
traverseToLeaf(fileSystem, fileStatus.getPath(), 1, depth, span,
fileCount, StorageSize.parse(perFileSize, StorageUnit.BYTES));
assertEquals(depth, actualDepth, "Mismatch depth in a path");
OzoneConfiguration conf = new OzoneConfiguration(cluster().getConf());
OzoneFileSystemTests.setPageSize(conf, PAGE_SIZE);
try (FileSystem fileSystem = FileSystem.get(URI.create(rootPath), conf)) {
Path rootDir = new Path(rootPath.concat("/"));
// verify root path details
FileStatus[] fileStatuses = fileSystem.listStatus(rootDir);
// verify the num of peer directories, expected span count is 1
// as it has only one dir at root.
verifyActualSpan(1, Arrays.asList(fileStatuses));
for (FileStatus fileStatus : fileStatuses) {
int actualDepth =
traverseToLeaf(fileSystem, fileStatus.getPath(), 1, depth, span,
fileCount, StorageSize.parse(perFileSize, StorageUnit.BYTES));
assertEquals(depth, actualDepth, "Mismatch depth in a path");
}
}
}

Expand Down
Loading