Skip to content
Closed
5 changes: 5 additions & 0 deletions hadoop-hdds/common/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -183,6 +183,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdds-interface-client</artifactId>
</dependency>
<dependency>
<groupId>org.hamcrest</groupId>
<artifactId>hamcrest-all</artifactId>
<scope>test</scope>
</dependency>
</dependencies>

<build>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,13 +25,20 @@

import java.io.File;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;

import static org.apache.hadoop.ozone.audit.AuditEventStatus.FAILURE;
import static org.apache.hadoop.ozone.audit.AuditEventStatus.SUCCESS;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.core.StringContains.containsString;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import org.hamcrest.Matcher;
import org.hamcrest.collection.IsIterableContainingInOrder;


/**
* Test Ozone Audit Logger.
Expand Down Expand Up @@ -143,7 +150,32 @@ public void notLogReadEvents() throws IOException {
verifyNoLog();
}

private void verifyLog(String expected) throws IOException {
/**
* Test to verify if multiline entries can be checked.
*/

@Test
public void messageIncludesMultilineException() throws IOException {
String exceptionMessage = "Dummy exception message";
TestException testException = new TestException(exceptionMessage);
AuditMessage exceptionAuditMessage =
new AuditMessage.Builder()
.setUser(USER)
.atIp(IP_ADDRESS)
.forOperation(DummyAction.CREATE_VOLUME)
.withParams(PARAMS)
.withResult(FAILURE)
.withException(testException).build();
AUDIT.logWriteFailure(exceptionAuditMessage);
verifyLog(
"ERROR | OMAudit | user=john | ip=192.168.0.1 | op=CREATE_VOLUME {key1=value1, key2=value2} | ret=FAILURE",
"org.apache.hadoop.ozone.audit.TestOzoneAuditLogger$TestException: Dummy exception message",
"at org.apache.hadoop.ozone.audit.TestOzoneAuditLogger.messageIncludesMultilineException(TestOzoneAuditLogger.java:160) [test-classes/:?]");


}

private void verifyLog(String... expectedStrings) throws IOException {
File file = new File("audit.log");
List<String> lines = FileUtils.readLines(file, (String)null);
final int retry = 5;
Expand All @@ -158,20 +190,44 @@ private void verifyLog(String expected) throws IOException {
}
i++;
}

// When log entry is expected, the log file will contain one line and
// that must be equal to the expected string
assertTrue(lines.size() != 0);
assertTrue(expected.equalsIgnoreCase(lines.get(0)));
//check if every expected string can be found in the log entry
assertThat(
lines.subList(0,expectedStrings.length),
containsInOrder(expectedStrings)
);
//empty the file
lines.clear();
FileUtils.writeLines(file, lines, false);
}

private boolean contains(List<String> lines, String searched){
for(String line : lines){
if(line.toLowerCase().contains(searched.toLowerCase())){
return true;
}
}
return false;
}

private void verifyNoLog() throws IOException {
File file = new File("audit.log");
List<String> lines = FileUtils.readLines(file, (String)null);
// When no log entry is expected, the log file must be empty
assertEquals(0, lines.size());
}

private class TestException extends Exception{
TestException(String message) {
super(message);
}
}

private Matcher<Iterable<? extends String>> containsInOrder(
String[] expectedStrings) {
return IsIterableContainingInOrder.contains(
Arrays.stream(expectedStrings)
.map(str -> containsString(str))
.collect(Collectors.toList())
);
}
}
2 changes: 1 addition & 1 deletion hadoop-hdds/docs/content/interface/S3.md
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ summary: Ozone supports Amazon's Simple Storage Service (S3) protocol. In fact,

Ozone provides S3 compatible REST interface to use the object store data with any S3 compatible tools.

S3 buckets are stored under the `/s3v` volume. The default name `s3v` can be changed by setting the `ozone.s3g.volume.name` config property in `ozone-site.xml`.
S3 buckets are stored under the `/s3v` volume.

## Getting started

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -407,6 +407,7 @@ private RaftProperties newRaftProperties(ConfigurationSource conf) {
StorageUnit.BYTES);
RaftServerConfigKeys.Log.setSegmentSizeMax(properties,
SizeInBytes.valueOf(raftSegmentSize));
RaftServerConfigKeys.Log.setPurgeUptoSnapshotIndex(properties, true);

// Set RAFT segment pre-allocated size
final int raftSegmentPreallocatedSize = (int) conf.getStorageSize(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -363,21 +363,11 @@ public long takeSnapshot() throws IOException {
public CompletableFuture<TermIndex> notifyInstallSnapshotFromLeader(
RaftProtos.RoleInfoProto roleInfoProto, TermIndex firstTermIndexInLog) {

String leaderNodeId = RaftPeerId.valueOf(roleInfoProto.getSelf().getId())
.toString();

LOG.info("Received install snapshot notificaiton form OM leader: {} with " +
String leaderNodeId = RaftPeerId.valueOf(roleInfoProto.getFollowerInfo()
.getLeaderInfo().getId().getId()).toString();
LOG.info("Received install snapshot notification from OM leader: {} with " +
"term index: {}", leaderNodeId, firstTermIndexInLog);

if (!roleInfoProto.getRole().equals(RaftProtos.RaftPeerRole.LEADER)) {
// A non-leader Ratis server should not send this notification.
LOG.error("Received Install Snapshot notification from non-leader OM " +
"node: {}. Ignoring the notification.", leaderNodeId);
return completeExceptionally(new OMException("Received notification to " +
"install snaphost from non-leader OM node",
OMException.ResultCodes.RATIS_ERROR));
}

CompletableFuture<TermIndex> future = CompletableFuture.supplyAsync(
() -> ozoneManager.installSnapshotFromLeader(leaderNodeId),
installSnapshotExecutor);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -301,6 +301,16 @@ public void printReport() {
messages.forEach(print);
}

/**
* Print out reports with the given message.
*/
public void print(String msg){
Consumer<String> print = freonCommand.isInteractive()
? System.out::println
: LOG::info;
print.accept(msg);
}

/**
* Create the OM RPC client to use it for testing.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -97,17 +97,24 @@ public class HadoopDirTreeGenerator extends BaseFreonGenerator

@Override
public Void call() throws Exception {

init();
OzoneConfiguration configuration = createOzoneConfiguration();
fileSystem = FileSystem.get(URI.create(rootPath), configuration);

contentGenerator = new ContentGenerator(fileSizeInBytes, bufferSize);
timer = getMetrics().timer("file-create");

runTests(this::createDir);
String s;
if (depth <= 0) {
s = "Invalid depth value, depth value should be greater than zero!";
print(s);
} else if (span <= 0) {
s = "Invalid span value, span value should be greater than zero!";
print(s);
} else {
init();
OzoneConfiguration configuration = createOzoneConfiguration();
fileSystem = FileSystem.get(URI.create(rootPath), configuration);

contentGenerator = new ContentGenerator(fileSizeInBytes, bufferSize);
timer = getMetrics().timer("file-create");

runTests(this::createDir);
}
return null;

}

/*
Expand Down Expand Up @@ -139,21 +146,14 @@ public Void call() throws Exception {
created.
*/
private void createDir(long counter) throws Exception {
if (depth <= 0) {
LOG.info("Invalid depth value, at least one depth should be passed!");
return;
}
if (span <= 0) {
LOG.info("Invalid span value, at least one span should be passed!");
return;
}
String dir = makeDirWithGivenNumberOfFiles(rootPath);
if (depth > 1) {
createSubDirRecursively(dir, 1, 1);
}
System.out.println("Successfully created directories & files. Total Dirs " +
String message = "Successfully created directories & files. Total Dirs " +
"Count=" + totalDirsCnt.get() + ", Total Files Count=" +
timer.getCount());
timer.getCount();
print(message);
}

private void createSubDirRecursively(String parent, int depthIndex,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -72,13 +72,20 @@ public class HadoopNestedDirGenerator extends BaseFreonGenerator

@Override
public Void call() throws Exception {

init();
OzoneConfiguration configuration = createOzoneConfiguration();
fileSystem = FileSystem.get(URI.create(rootPath), configuration);
runTests(this::createDir);
String s;
if (depth <= 0) {
s = "Invalid depth value, depth value should be greater than zero!";
print(s);
} else if (span < 0) {
s = "Invalid span value, span value should be greater or equal to zero!";
print(s);
} else {
init();
OzoneConfiguration configuration = createOzoneConfiguration();
fileSystem = FileSystem.get(URI.create(rootPath), configuration);
runTests(this::createDir);
}
return null;

}

/*
Expand Down Expand Up @@ -109,5 +116,8 @@ private void createDir(long counter) throws Exception {
Path dir = new Path(rootPath.concat("/").concat(childDir));
fileSystem.mkdirs(dir.getParent());
}
String message = "\nSuccessfully created directories. " +
"Total Directories with level = " + depth + " and span = " + span;
print(message);
}
}