Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@
import org.slf4j.Logger;

import java.io.Closeable;
import java.util.Arrays;
import java.util.Collection;

/**
* Static helper utilities for IO / Closable classes.
Expand Down Expand Up @@ -59,6 +61,14 @@ public static void cleanupWithLogger(Logger logger, Closeable... closeables) {
* Close each argument, catching exceptions and logging them as error.
*/
public static void close(Logger logger, AutoCloseable... closeables) {
close(logger, Arrays.asList(closeables));
}

/**
* Close each argument, catching exceptions and logging them as error.
*/
public static void close(Logger logger,
Collection<AutoCloseable> closeables) {
if (closeables == null) {
return;
}
Expand All @@ -82,4 +92,10 @@ public static void closeQuietly(AutoCloseable... closeables) {
close(null, closeables);
}

/**
* Close each argument, swallowing exceptions.
*/
public static void closeQuietly(Collection<AutoCloseable> closeables) {
close(null, closeables);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ SCMRatisResponse submitRequest(SCMRatisRequest request)
/**
* Returns roles of ratis peers.
*/
List<String> getRatisRoles() throws IOException;
List<String> getRatisRoles();

/**
* Returns NotLeaderException with useful info.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -248,7 +248,7 @@ public boolean isStopped() {


@Override
public List<String> getRatisRoles() throws IOException {
public List<String> getRatisRoles() {
Collection<RaftPeer> peers = division.getGroup().getPeers();
RaftPeer leader = getLeader();
List<String> ratisRoles = new ArrayList<>();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -778,7 +778,7 @@ public void closePipeline(HddsProtos.PipelineID pipelineID)
}

@Override
public ScmInfo getScmInfo() throws IOException {
public ScmInfo getScmInfo() {
boolean auditSuccess = true;
try {
ScmInfo.Builder builder =
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
Expand All @@ -18,7 +18,6 @@

package org.apache.hadoop.hdds.scm.server;

import java.io.IOException;
import java.util.Map;

import org.apache.hadoop.hdds.annotation.InterfaceAudience;
Expand Down Expand Up @@ -66,7 +65,7 @@ public interface SCMMXBean extends ServiceRuntimeInfo {

String getClusterId();

String getScmRatisRoles() throws IOException;
String getScmRatisRoles();

/**
* Primordial node is the node on which scm init operation is performed.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2057,7 +2057,7 @@ public ContainerTokenGenerator getContainerTokenGenerator() {
}

@Override
public String getScmRatisRoles() throws IOException {
public String getScmRatisRoles() {
final SCMRatisServer server = getScmHAManager().getRatisServer();
return server != null ?
HddsUtils.format(server.getRatisRoles()) : "STANDALONE";
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -279,6 +279,16 @@ void restartHddsDatanode(DatanodeDetails dn, boolean waitForDatanode)
*/
void shutdownHddsDatanodes();

String getClusterId();

default String getName() {
return getClass().getSimpleName() + "-" + getClusterId();
}

default String getBaseDir() {
return GenericTestUtils.getTempPath(getName());
}

/**
* Builder class for MiniOzoneCluster.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -431,18 +431,16 @@ public void shutdownHddsDatanode(DatanodeDetails dn) throws IOException {
shutdownHddsDatanode(getHddsDatanodeIndex(dn));
}

public String getClusterId() throws IOException {
public String getClusterId() {
return scm.getClientProtocolServer().getScmInfo().getClusterId();
}

@Override
public void shutdown() {
try {
LOG.info("Shutting down the Mini Ozone Cluster");
IOUtils.closeQuietly(clients.toArray(new AutoCloseable[0]));
File baseDir = new File(GenericTestUtils.getTempPath(
MiniOzoneClusterImpl.class.getSimpleName() + "-" +
getClusterId()));
IOUtils.closeQuietly(clients);
final File baseDir = new File(getBaseDir());
stop();
FileUtils.deleteDirectory(baseDir);
ContainerCache.getInstance(conf).shutdownCache();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -267,7 +267,8 @@ public StorageContainerManager restartStorageContainerManager(
return scm;
}

public String getClusterId() throws IOException {
@Override
public String getClusterId() {
return scmhaService.getServices().get(0)
.getClientProtocolServer().getScmInfo().getClusterId();
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,6 @@

package org.apache.hadoop.ozone;

import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;

import org.apache.hadoop.hdds.DFSConfigKeysLegacy;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.client.StandaloneReplicationConfig;
Expand All @@ -43,22 +37,27 @@
import org.apache.hadoop.ozone.container.ozoneimpl.TestOzoneContainer;
import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.test.TestGenericTestUtils;
import org.apache.ozone.test.GenericTestUtils;
import org.apache.ozone.test.tag.Flaky;

import static org.apache.hadoop.hdds.protocol.DatanodeDetails.Port;
import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails;
import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT;

import org.junit.Assert;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertTrue;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;

import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;

import static org.apache.hadoop.hdds.protocol.DatanodeDetails.Port;
import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails;
import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;

/**
* Test cases for mini ozone cluster.
*/
Expand Down Expand Up @@ -160,18 +159,18 @@ public void testContainerRandomPort() throws IOException {
for (DatanodeStateMachine dsm : stateMachines) {
int readPort = dsm.getContainer().getReadChannel().getIPCPort();

assertNotEquals("Port number of the service is not updated", 0,
readPort);
assertNotEquals(0, readPort,
"Port number of the service is not updated");

assertTrue("Port of datanode service is conflicted with other server.",
ports.add(readPort));
assertTrue(ports.add(readPort),
"Port of datanode service is conflicted with other server.");

int writePort = dsm.getContainer().getWriteChannel().getIPCPort();

assertNotEquals("Port number of the service is not updated", 0,
writePort);
assertTrue("Port of datanode service is conflicted with other server.",
ports.add(writePort));
assertNotEquals(0, writePort,
"Port number of the service is not updated");
assertTrue(ports.add(writePort),
"Port of datanode service is conflicted with other server.");
}

} finally {
Expand Down Expand Up @@ -243,7 +242,7 @@ public void testDNstartAfterSCM() throws Exception {
for (int i = 0; i < 20; i++) {
for (EndpointStateMachine endpoint :
dnStateMachine.getConnectionManager().getValues()) {
Assert.assertEquals(
assertEquals(
EndpointStateMachine.EndPointStates.GETVERSION,
endpoint.getState());
}
Expand All @@ -258,7 +257,7 @@ public void testDNstartAfterSCM() throws Exception {
// DN should be in HEARTBEAT state after registering with the SCM
for (EndpointStateMachine endpoint :
dnStateMachine.getConnectionManager().getValues()) {
Assert.assertEquals(EndpointStateMachine.EndPointStates.HEARTBEAT,
assertEquals(EndpointStateMachine.EndPointStates.HEARTBEAT,
endpoint.getState());
}
}
Expand All @@ -278,13 +277,21 @@ public void testMultipleDataDirs() throws Exception {
.build();
cluster.waitForClusterToBeReady();

final String name = MiniOzoneClusterImpl.class.getSimpleName()
+ "-" + cluster.getClusterId();
assertEquals(name, cluster.getName());

final String baseDir = GenericTestUtils.getTempPath(name);
assertEquals(baseDir, cluster.getBaseDir());


List<StorageVolume> volumeList = cluster.getHddsDatanodes().get(0)
.getDatanodeStateMachine().getContainer().getVolumeSet()
.getVolumesList();

Assert.assertEquals(3, volumeList.size());
assertEquals(3, volumeList.size());

volumeList.forEach(storageVolume -> Assert.assertEquals(
volumeList.forEach(storageVolume -> assertEquals(
(long) StorageSize.parse(reservedSpace).getValue(),
storageVolume.getVolumeInfo().get().getReservedInBytes()));
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -298,7 +298,7 @@ public void testBootStrapSCM() throws Exception {
}

@Test
public void testGetRatisRolesDetail() throws IOException {
public void testGetRatisRolesDetail() {
Set<String> resultSet = new HashSet<>();
for (StorageContainerManager scm: cluster.getStorageContainerManagers()) {
resultSet.addAll(scm.getScmHAManager().getRatisServer().getRatisRoles());
Expand Down