Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@
import org.apache.log4j.Logger;
import org.junit.jupiter.api.Assertions;
import org.mockito.Mockito;
import org.slf4j.LoggerFactory;

/**
* Provides some very generic helpers which might be used across the tests.
Expand Down Expand Up @@ -179,6 +180,18 @@ public static void setLogLevel(org.slf4j.Logger logger,
setLogLevel(toLog4j(logger), Level.toLevel(level.toString()));
}

public static void withLogDisabled(Class<?> clazz, Runnable task) {
org.slf4j.Logger logger = LoggerFactory.getLogger(clazz);
final Logger log4j = toLog4j(logger);
final Level level = log4j.getLevel();
setLogLevel(log4j, Level.OFF);
try {
task.run();
} finally {
setLogLevel(log4j, level);
}
}

public static <T> T mockFieldReflection(Object object, String fieldName)
throws NoSuchFieldException, IllegalAccessException {
Field field = object.getClass().getDeclaredField(fieldName);
Expand Down
15 changes: 15 additions & 0 deletions hadoop-ozone/dev-support/checks/_mvn_unit_report.sh
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,15 @@ if [[ "${CHECK:-unit}" == "integration" ]]; then
cat ${leaks} >> "${tempfile}"
fi

cluster=${REPORT_DIR}/cluster-startup-errors.txt
if [[ "${CHECK:-unit}" == "integration" ]]; then
find hadoop-ozone/integration-test -not -path '*/iteration*' -name '*-output.txt' -print0 \
| xargs -n1 -0 "grep" -l -E "Unable to build MiniOzoneCluster" \
| awk -F/ '{sub("-output.txt",""); print $NF}' \
> "${cluster}"
cat ${cluster} >> "${tempfile}"
fi

#Copy heap dump and dump leftovers
find "." -not -path '*/iteration*' \
\( -name "*.hprof" \
Expand Down Expand Up @@ -114,6 +123,12 @@ if [[ -s "${leaks}" ]]; then
fi
rm -f "${leaks}"

if [[ -s "${cluster}" ]]; then
printf "# Cluster Startup Errors\n\n" >> "$SUMMARY_FILE"
cat "${cluster}" | sed 's/^/ * /' >> "$SUMMARY_FILE"
fi
rm -f "${cluster}"

if [[ -s "${crashes}" ]]; then
printf "# Crashed Tests\n\n" >> "$SUMMARY_FILE"
cat "${crashes}" | sed 's/^/ * /' >> "$SUMMARY_FILE"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -347,7 +347,6 @@ public void test2WayCommitForTimeoutException(RaftProtos.ReplicationLevel watchT
assertEquals(2, ratisClient.getCommitInfoMap().size());
String output = logCapturer.getOutput();
assertThat(output).contains("ALL_COMMITTED way commit failed");
assertThat(output).contains("TimeoutException");
assertThat(output).contains("Committed by majority");
} else {
assertEquals(3, ratisClient.getCommitInfoMap().size());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -662,6 +662,8 @@ public MiniOzoneCluster build() throws IOException {
stopSCM(scm);
removeConfiguration();

LOG.warn("Unable to build MiniOzoneCluster", ex);

if (ex instanceof IOException) {
throw (IOException) ex;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -108,13 +108,15 @@
import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
import org.apache.ozone.test.GenericTestUtils;
import org.apache.ozone.test.tag.Flaky;
import org.apache.ozone.test.tag.Unhealthy;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.EnumSource;

@Unhealthy("HDDS-11879")
class TestOzoneAtRestEncryption {

private static MiniOzoneCluster cluster = null;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,6 @@
import org.apache.hadoop.ozone.client.SecretKeyTestClient;
import org.apache.hadoop.ozone.client.io.OzoneInputStream;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OMMetrics;
import org.apache.hadoop.ozone.om.OzoneManager;
Expand Down Expand Up @@ -100,7 +99,7 @@ class TestSecureOzoneRpcClient extends OzoneRpcClientTests {

@TempDir
private static File testDir;
private static String keyProviderUri = "kms://http@kms:9600/kms";
private static String keyProviderUri = "kms://http@localhost:9600/kms";
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Let's make it final as well

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This patch is a bugfix, not code cleanup.


@BeforeAll
public static void init() throws Exception {
Expand All @@ -115,11 +114,6 @@ public static void init() throws Exception {
OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS_NATIVE);
CertificateClientTestImpl certificateClientTest =
new CertificateClientTestImpl(conf);
// These tests manually insert keys into RocksDB. This is easier to do
// with object store layout so keys with path IDs do not need to be
// constructed.
conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT,
OMConfigKeys.OZONE_BUCKET_LAYOUT_OBJECT_STORE);
conf.setBoolean(OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED, true);
conf.setBoolean("ozone.client.hbase.enhancements.allowed", true);
conf.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true);
Expand Down Expand Up @@ -236,26 +230,27 @@ public void testFileRecovery(boolean forceRecovery) throws Exception {
// force recovery file
System.setProperty(FORCE_LEASE_RECOVERY_ENV, String.valueOf(forceRecovery));
conf.setBoolean(String.format("fs.%s.impl.disable.cache", OZONE_OFS_URI_SCHEME), true);
RootedOzoneFileSystem fs = (RootedOzoneFileSystem) FileSystem.get(conf);
OzoneOutputStream out = null;
try {
out = bucket.createKey(keyName, value.getBytes(UTF_8).length, ReplicationType.RATIS,
ReplicationFactor.THREE, new HashMap<>());
out.write(value.getBytes(UTF_8));
out.hsync();

if (forceRecovery) {
fs.recoverLease(file);
} else {
assertThrows(OMException.class, () -> fs.recoverLease(file));
}
} finally {
if (out != null) {
try (RootedOzoneFileSystem fs = (RootedOzoneFileSystem) FileSystem.get(conf)) {
OzoneOutputStream out = null;
try {
out = bucket.createKey(keyName, value.getBytes(UTF_8).length, ReplicationType.RATIS,
ReplicationFactor.THREE, new HashMap<>());
out.write(value.getBytes(UTF_8));
out.hsync();

if (forceRecovery) {
// close failure because the key is already committed
assertThrows(OMException.class, out::close);
fs.recoverLease(file);
} else {
out.close();
assertThrows(OMException.class, () -> fs.recoverLease(file));
}
} finally {
if (out != null) {
if (forceRecovery) {
// close failure because the key is already committed
assertThrows(OMException.class, out::close);
} else {
out.close();
}
}
}
}
Expand Down Expand Up @@ -288,51 +283,52 @@ public void testPreallocateFileRecovery(long dataSize) throws Exception {
final String rootPath = String.format("%s://%s/",
OZONE_OFS_URI_SCHEME, conf.get(OZONE_OM_ADDRESS_KEY));
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
RootedOzoneFileSystem fs = (RootedOzoneFileSystem) FileSystem.get(conf);
OzoneOutputStream out = null;
long totalBlock = 10;
long usedBlock = (dataSize - 1) / fs.getDefaultBlockSize() + 1;
long fileSize = fs.getDefaultBlockSize() * totalBlock;
OMMetrics metrics = getCluster().getOzoneManager().getMetrics();
long committedBytes = metrics.getDataCommittedBytes();
try {
out = bucket.createKey(keyName, fileSize, ReplicationType.RATIS,
ReplicationFactor.THREE, new HashMap<>());
// init used quota check
bucket = volume.getBucket(bucketName);
assertEquals(0, bucket.getUsedNamespace());
assertEquals(0, bucket.getUsedBytes());

out.write(data);
out.hsync();
fs.recoverLease(file);

// check file length
FileStatus fileStatus = fs.getFileStatus(file);
assertEquals(dataSize, fileStatus.getLen());
// check committed bytes
assertEquals(committedBytes + dataSize,
getCluster().getOzoneManager().getMetrics().getDataCommittedBytes());
// check used quota
bucket = volume.getBucket(bucketName);
assertEquals(1, bucket.getUsedNamespace());
assertEquals(dataSize * ReplicationFactor.THREE.getValue(), bucket.getUsedBytes());

// check unused pre-allocated blocks are reclaimed
Table<String, RepeatedOmKeyInfo> deletedTable =
getCluster().getOzoneManager().getMetadataManager().getDeletedTable();
try (TableIterator<String, ? extends Table.KeyValue<String, RepeatedOmKeyInfo>>
keyIter = deletedTable.iterator()) {
while (keyIter.hasNext()) {
Table.KeyValue<String, RepeatedOmKeyInfo> kv = keyIter.next();
OmKeyInfo key = kv.getValue().getOmKeyInfoList().get(0);
assertEquals(totalBlock - usedBlock, key.getKeyLocationVersions().get(0).getLocationListCount());
try (RootedOzoneFileSystem fs = (RootedOzoneFileSystem) FileSystem.get(conf)) {
OzoneOutputStream out = null;
long totalBlock = 10;
long usedBlock = (dataSize - 1) / fs.getDefaultBlockSize() + 1;
long fileSize = fs.getDefaultBlockSize() * totalBlock;
OMMetrics metrics = getCluster().getOzoneManager().getMetrics();
long committedBytes = metrics.getDataCommittedBytes();
try {
out = bucket.createKey(keyName, fileSize, ReplicationType.RATIS,
ReplicationFactor.THREE, new HashMap<>());
// init used quota check
bucket = volume.getBucket(bucketName);
assertEquals(0, bucket.getUsedNamespace());
assertEquals(0, bucket.getUsedBytes());

out.write(data);
out.hsync();
fs.recoverLease(file);

// check file length
FileStatus fileStatus = fs.getFileStatus(file);
assertEquals(dataSize, fileStatus.getLen());
// check committed bytes
assertEquals(committedBytes + dataSize,
getCluster().getOzoneManager().getMetrics().getDataCommittedBytes());
// check used quota
bucket = volume.getBucket(bucketName);
assertEquals(1, bucket.getUsedNamespace());
assertEquals(dataSize * ReplicationFactor.THREE.getValue(), bucket.getUsedBytes());

// check unused pre-allocated blocks are reclaimed
Table<String, RepeatedOmKeyInfo> deletedTable =
getCluster().getOzoneManager().getMetadataManager().getDeletedTable();
try (TableIterator<String, ? extends Table.KeyValue<String, RepeatedOmKeyInfo>>
keyIter = deletedTable.iterator()) {
while (keyIter.hasNext()) {
Table.KeyValue<String, RepeatedOmKeyInfo> kv = keyIter.next();
OmKeyInfo key = kv.getValue().getOmKeyInfoList().get(0);
assertEquals(totalBlock - usedBlock, key.getKeyLocationVersions().get(0).getLocationListCount());
}
}
} finally {
if (out != null) {
// close failure because the key is already committed
assertThrows(OMException.class, out::close);
}
}
} finally {
if (out != null) {
// close failure because the key is already committed
assertThrows(OMException.class, out::close);
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,10 +27,12 @@
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.MiniOzoneClusterImpl;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature;
import org.apache.hadoop.ozone.upgrade.LayoutFeature;
import org.apache.hadoop.ozone.upgrade.UpgradeTestUtils;
import org.apache.ozone.test.GenericTestUtils;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;

Expand Down Expand Up @@ -66,11 +68,13 @@ public void testStartupSlvLessThanMlv() throws Exception {

MiniOzoneCluster.Builder clusterBuilder = MiniOzoneCluster.newBuilder(conf);

OMException omException = assertThrows(OMException.class,
clusterBuilder::build);
String expectedMessage = String.format("Cannot initialize " +
"VersionManager. Metadata layout version (%s) > software layout" +
" version (%s)", mlv, largestSlv);
assertEquals(expectedMessage, omException.getMessage());
GenericTestUtils.withLogDisabled(MiniOzoneClusterImpl.class, () -> {
OMException omException = assertThrows(OMException.class,
clusterBuilder::build);
String expectedMessage = String.format("Cannot initialize " +
"VersionManager. Metadata layout version (%s) > software layout" +
" version (%s)", mlv, mlv - 1);
assertEquals(expectedMessage, omException.getMessage());
});
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -35,11 +35,13 @@
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.MiniOzoneClusterImpl;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.OzoneIllegalArgumentException;
import org.apache.hadoop.ozone.ha.ConfUtils;
import org.apache.hadoop.ozone.om.helpers.OMNodeDetails;
import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer;
import org.apache.ozone.test.GenericTestUtils;
import org.apache.ratis.protocol.RaftPeer;
import org.apache.ratis.util.LifeCycle;
import org.junit.jupiter.api.AfterEach;
Expand Down Expand Up @@ -342,34 +344,35 @@ public void testWrongConfiguration() {
conf.set(omNode2RpcAddrKey, "125.0.0.2:9862");
conf.set(omNode3RpcAddrKey, "124.0.0.124:9862");

OzoneIllegalArgumentException exception = assertThrows(OzoneIllegalArgumentException.class, this::startCluster);
assertThat(exception).hasMessage(
"Configuration has no " + OZONE_OM_ADDRESS_KEY + " address that matches local node's address.");
GenericTestUtils.withLogDisabled(MiniOzoneClusterImpl.class, () -> {
Exception exception = assertThrows(OzoneIllegalArgumentException.class, this::startCluster);
assertThat(exception).hasMessage(
"Configuration has no " + OZONE_OM_ADDRESS_KEY + " address that matches local node's address.");
});
}

/**
* A configuration with an empty node list while service ID is configured.
* Cluster should fail to start during config check.
* @throws Exception
*/
@Test
public void testNoOMNodes() throws Exception {
public void testNoOMNodes() {
String omServiceId = "service1";
conf.set(OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY, omServiceId);
// Deliberately skip OZONE_OM_NODES_KEY and OZONE_OM_ADDRESS_KEY config
OzoneIllegalArgumentException e =
assertThrows(OzoneIllegalArgumentException.class, () -> startCluster());
// Expect error message
assertTrue(e.getMessage().contains("List of OM Node ID's should be specified"));
GenericTestUtils.withLogDisabled(MiniOzoneClusterImpl.class, () -> {
Exception e = assertThrows(OzoneIllegalArgumentException.class, this::startCluster);
// Expect error message
assertThat(e).hasMessageContaining("List of OM Node ID's should be specified");
});
}

/**
* A configuration with no OM addresses while service ID is configured.
* Cluster should fail to start during config check.
* @throws Exception
*/
@Test
public void testNoOMAddrs() throws Exception {
public void testNoOMAddrs() {
String omServiceId = "service1";

String omNode1Id = "omNode1";
Expand All @@ -382,9 +385,11 @@ public void testNoOMAddrs() throws Exception {
conf.set(OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY, omServiceId);
conf.set(omNodesKey, omNodesKeyValue);
// Deliberately skip OZONE_OM_ADDRESS_KEY config
OzoneIllegalArgumentException e = assertThrows(OzoneIllegalArgumentException.class, () -> startCluster());
// Expect error message
assertTrue(e.getMessage().contains("OM RPC Address should be set for all node"));
GenericTestUtils.withLogDisabled(MiniOzoneClusterImpl.class, () -> {
Exception e = assertThrows(OzoneIllegalArgumentException.class, this::startCluster);
// Expect error message
assertThat(e).hasMessageContaining("OM RPC Address should be set for all node");
});
}

/**
Expand Down
Loading