Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,17 @@ public void validate() {
}
}

public OmConfig copy() {
OmConfig copy = new OmConfig();
copy.setFrom(this);
return copy;
}

public void setFrom(OmConfig other) {
fileSystemPathEnabled = other.fileSystemPathEnabled;
maxListSize = other.maxListSize;
}

/**
* String keys for tests and grep.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
package org.apache.hadoop.ozone.om;

import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;

import org.apache.hadoop.hdds.conf.MutableConfigurationSource;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
Expand Down Expand Up @@ -51,4 +52,32 @@ void overridesInvalidListSize(long invalidValue) {
.isEqualTo(OmConfig.Defaults.SERVER_LIST_MAX_SIZE);
}

@Test
void testCopy() {
MutableConfigurationSource conf = new OzoneConfiguration();
OmConfig original = conf.getObject(OmConfig.class);

OmConfig subject = original.copy();

assertConfigEquals(original, subject);
}

@Test
void testSetFrom() {
MutableConfigurationSource conf = new OzoneConfiguration();
OmConfig subject = conf.getObject(OmConfig.class);
OmConfig updated = conf.getObject(OmConfig.class);
updated.setFileSystemPathEnabled(!updated.isFileSystemPathEnabled());
updated.setMaxListSize(updated.getMaxListSize() + 1);

subject.setFrom(updated);

assertConfigEquals(updated, subject);
}

private static void assertConfigEquals(OmConfig expected, OmConfig actual) {
assertEquals(expected.getMaxListSize(), actual.getMaxListSize());
assertEquals(expected.isFileSystemPathEnabled(), actual.isFileSystemPathEnabled());
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.OzoneClientConfig;
import org.apache.hadoop.hdds.utils.IOUtils;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.TestDataUtil;
Expand All @@ -43,6 +42,7 @@
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.ozone.test.NonHATests;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.TestInstance;
Expand All @@ -52,12 +52,10 @@
/**
* Ozone file system tests to validate default bucket layout configuration
* and behaviour.
* TODO: merge with some other test
*/
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
class TestOzoneFSBucketLayout {
public abstract class TestOzoneFSBucketLayout implements NonHATests.TestCase {

private MiniOzoneCluster cluster;
private ObjectStore objectStore;
private OzoneClient client;
private String rootPath;
Expand Down Expand Up @@ -95,25 +93,17 @@ static Collection<String> invalidDefaultBucketLayouts() {
}

@BeforeAll
void initCluster() throws Exception {
OzoneConfiguration conf = new OzoneConfiguration();
cluster = MiniOzoneCluster.newBuilder(conf)
.setNumDatanodes(3)
.build();
cluster.waitForClusterToBeReady();
client = cluster.newClient();
void setUp() throws Exception {
client = cluster().newClient();
objectStore = client.getObjectStore();
rootPath = String.format("%s://%s/",
OzoneConsts.OZONE_OFS_URI_SCHEME, conf.get(OZONE_OM_ADDRESS_KEY));
OzoneConsts.OZONE_OFS_URI_SCHEME, cluster().getConf().get(OZONE_OM_ADDRESS_KEY));
volumeName = TestDataUtil.createVolumeAndBucket(client).getVolumeName();
}

@AfterAll
void teardown() throws IOException {
void tearDown() {
IOUtils.closeQuietly(client);
if (cluster != null) {
cluster.shutdown();
}
}

@ParameterizedTest
Expand All @@ -134,6 +124,7 @@ void fileSystemWithUnsupportedDefaultBucketLayout(String layout) {
assertThat(e.getMessage())
.contains(ERROR_MAP.get(layout));
}

@ParameterizedTest
@MethodSource("validDefaultBucketLayouts")
void fileSystemWithValidBucketLayout(String layout) throws IOException {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,10 @@
package org.apache.hadoop.fs.ozone;

import static org.apache.hadoop.hdds.StringUtils.string2Bytes;
import static org.apache.hadoop.hdds.utils.IOUtils.closeQuietly;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertThrows;

import java.io.BufferedInputStream;
Expand All @@ -28,86 +30,58 @@
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.net.URI;
import java.nio.ByteBuffer;
import java.util.UUID;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdds.client.DefaultReplicationConfig;
import org.apache.hadoop.hdds.client.ECReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.StorageType;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.ozone.ClientConfigForTesting;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.TestDataUtil;
import org.apache.hadoop.ozone.client.BucketArgs;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.assertj.core.api.Assertions;
import org.apache.ozone.test.NonHATests;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.api.Timeout;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.ValueSource;

/**
* Test OzoneFSInputStream by reading through multiple interfaces.
*/
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
@Timeout(300)
public class TestOzoneFSInputStream {

private static MiniOzoneCluster cluster = null;
private static OzoneClient client;
private static FileSystem fs;
private static FileSystem ecFs;
private static Path filePath = null;
private static byte[] data = null;
private static OzoneConfiguration conf = null;

/**
* Create a MiniDFSCluster for testing.
* <p>
* Ozone is made active by setting OZONE_ENABLED = true
*
* @throws IOException
*/
public abstract class TestOzoneFSInputStream implements NonHATests.TestCase {

private OzoneClient client;
private FileSystem fs;
private FileSystem ecFs;
private Path filePath = null;
private byte[] data = null;

@BeforeAll
public static void init() throws Exception {
conf = new OzoneConfiguration();
conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT,
BucketLayout.LEGACY.name());

ClientConfigForTesting.newBuilder(StorageUnit.MB)
.setChunkSize(2)
.setBlockSize(8)
.setStreamBufferFlushSize(2)
.setStreamBufferMaxSize(4)
.applyTo(conf);

cluster = MiniOzoneCluster.newBuilder(conf)
.setNumDatanodes(5)
.build();
cluster.waitForClusterToBeReady();
client = cluster.newClient();
void init() throws Exception {
client = cluster().newClient();

// create a volume and a bucket to be used by OzoneFileSystem
OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client);

// Set the fs.defaultFS and start the filesystem
String uri = String.format("%s://%s.%s/",
OzoneConsts.OZONE_URI_SCHEME, bucket.getName(), bucket.getVolumeName());
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, uri);
fs = FileSystem.get(conf);
fs = FileSystem.get(URI.create(uri), cluster().getConf());
int fileLen = 30 * 1024 * 1024;
data = string2Bytes(RandomStringUtils.randomAlphanumeric(fileLen));
filePath = new Path("/" + RandomStringUtils.randomAlphanumeric(5));
Expand All @@ -129,19 +103,12 @@ public static void init() throws Exception {
ecBucket);
String ecUri = String.format("%s://%s.%s/",
OzoneConsts.OZONE_URI_SCHEME, ecBucket, bucket.getVolumeName());
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, ecUri);
ecFs = FileSystem.get(conf);
ecFs = FileSystem.get(URI.create(ecUri), cluster().getConf());
}

/**
* Shutdown MiniDFSCluster.
*/
@AfterAll
public static void shutdown() throws IOException {
IOUtils.cleanupWithLogger(null, client);
fs.close();
ecFs.close();
cluster.shutdown();
void shutdown() {
closeQuietly(client, fs, ecFs);
}

@Test
Expand Down Expand Up @@ -240,7 +207,7 @@ public void testByteBufferPositionedReadFully() throws IOException {
// File position should not be changed after positional readFully
assertEquals(currentPos, inputStream.getPos());
// Make sure buffer is full after readFully
Assertions.assertThat((!buffer.hasRemaining()));
assertFalse(buffer.hasRemaining());

byte[] value1 = new byte[bufferCapacity];
System.arraycopy(buffer.array(), 0, value1, 0, bufferCapacity);
Expand All @@ -254,7 +221,7 @@ public void testByteBufferPositionedReadFully() throws IOException {
position = 8;
inputStream.readFully(position, buffer);
assertEquals(currentPos, inputStream.getPos());
Assertions.assertThat((!buffer.hasRemaining()));
assertFalse(buffer.hasRemaining());
byte[] value3 = new byte[bufferCapacity];
System.arraycopy(buffer.array(), 0, value3, 0, bufferCapacity);
byte[] value4 = new byte[bufferCapacity];
Expand Down Expand Up @@ -328,7 +295,7 @@ public void testSequenceFileReaderSync() throws IOException {
input.close();

// Start SequenceFile.Reader test
SequenceFile.Reader in = new SequenceFile.Reader(fs, path, conf);
SequenceFile.Reader in = new SequenceFile.Reader(fs, path, cluster().getConf());
long blockStart = -1;
// EOFException should not occur.
in.sync(0);
Expand All @@ -350,7 +317,7 @@ public void testSequenceFileReaderSyncEC() throws IOException {
input.close();

// Start SequenceFile.Reader test
SequenceFile.Reader in = new SequenceFile.Reader(ecFs, path, conf);
SequenceFile.Reader in = new SequenceFile.Reader(ecFs, path, cluster().getConf());
long blockStart = -1;
// EOFException should not occur.
in.sync(0);
Expand Down
Loading
Loading