diff --git a/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml b/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml
index f68fa91db864..40d78d0cd6ce 100644
--- a/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml
@@ -15,70 +15,4 @@
limitations under the License.
-->
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/AbstractBackgroundContainerScanner.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/AbstractBackgroundContainerScanner.java
index dd7e30a5d9d2..802a86af44d3 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/AbstractBackgroundContainerScanner.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/AbstractBackgroundContainerScanner.java
@@ -29,12 +29,12 @@
/**
* Base class for scheduled scanners on a Datanode.
*/
-public abstract class AbstractBackgroundContainerScanner extends Thread {
+public abstract class AbstractBackgroundContainerScanner implements Runnable {
public static final Logger LOG =
LoggerFactory.getLogger(AbstractBackgroundContainerScanner.class);
private final long dataScanInterval;
-
+ private final Thread scannerThread;
private final AtomicBoolean stopping;
private final AtomicBoolean pausing = new AtomicBoolean();
@@ -42,8 +42,13 @@ public AbstractBackgroundContainerScanner(String name,
long dataScanInterval) {
this.dataScanInterval = dataScanInterval;
this.stopping = new AtomicBoolean(false);
- setName(name);
- setDaemon(true);
+
+ this.scannerThread = new Thread(this, name);
+ this.scannerThread.setDaemon(true);
+ }
+
+ public void start() {
+ scannerThread.start();
}
@Override
@@ -141,9 +146,9 @@ public final void handleRemainingSleep(long remainingSleep) {
*/
public synchronized void shutdown() {
if (stopping.compareAndSet(false, true)) {
- this.interrupt();
+ scannerThread.interrupt();
try {
- this.join();
+ scannerThread.join();
} catch (InterruptedException ex) {
LOG.warn("Unexpected exception while stopping data scanner.", ex);
Thread.currentThread().interrupt();
@@ -151,6 +156,10 @@ public synchronized void shutdown() {
}
}
+ public boolean isAlive() {
+ return scannerThread.isAlive();
+ }
+
public void pause() {
pausing.getAndSet(true);
}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java
index 472fc6c640ec..8a520da3cdbb 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java
@@ -17,6 +17,7 @@
package org.apache.hadoop.ozone.container.common;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.Mockito.mock;
import com.google.protobuf.BlockingService;
@@ -122,12 +123,18 @@ public static InetSocketAddress getReuseableAddress() throws IOException {
public static OzoneConfiguration getConf(File testDir) {
OzoneConfiguration conf = new OzoneConfiguration();
+ File datanodeDir = new File(testDir, "datanode");
+ File metadataDir = new File(testDir, "metadata");
+ File datanodeIdDir = new File(testDir, "datanodeID");
+ assertTrue(datanodeDir.mkdirs());
+ assertTrue(metadataDir.mkdirs());
+ assertTrue(datanodeIdDir.mkdirs());
conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY,
- new File(testDir, "datanode").getAbsolutePath());
+ datanodeDir.getAbsolutePath());
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS,
- new File(testDir, "metadata").getAbsolutePath());
+ metadataDir.getAbsolutePath());
conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR,
- new File(testDir, "datanodeID").getAbsolutePath());
+ datanodeIdDir.getAbsolutePath());
conf.setClass(SpaceUsageCheckFactory.Conf.configKeyForClassName(),
MockSpaceUsageCheckFactory.None.class,
SpaceUsageCheckFactory.class);
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java
index 3beb734f1747..f8ae2f3bdcd2 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java
@@ -22,6 +22,7 @@
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
import java.io.File;
@@ -31,6 +32,7 @@
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
+import org.apache.commons.io.FileUtils;
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConfigKeys;
@@ -63,7 +65,7 @@ private void createContainerDB(OzoneConfiguration conf, File dbFile)
@Test
public void testContainerCacheEviction() throws Exception {
File root = new File(testRoot);
- root.mkdirs();
+ assertTrue(root.mkdirs());
OzoneConfiguration conf = new OzoneConfiguration();
conf.setInt(OzoneConfigKeys.OZONE_CONTAINER_CACHE_SIZE, 2);
@@ -139,13 +141,14 @@ public void testContainerCacheEviction() throws Exception {
db4.close();
db5.close();
});
+
+ FileUtils.deleteDirectory(root);
}
@Test
void testConcurrentDBGet() throws Exception {
File root = new File(testRoot);
- root.mkdirs();
- root.deleteOnExit();
+ assertTrue(root.mkdirs());
OzoneConfiguration conf = new OzoneConfiguration();
conf.setInt(OzoneConfigKeys.OZONE_CONTAINER_CACHE_SIZE, 2);
@@ -180,12 +183,13 @@ void testConcurrentDBGet() throws Exception {
db.close();
assertEquals(1, cache.size());
db.cleanup();
+ FileUtils.deleteDirectory(root);
}
@Test
public void testUnderlyingDBzIsClosed() throws Exception {
File root = new File(testRoot);
- root.mkdirs();
+ assertTrue(root.mkdirs());
OzoneConfiguration conf = new OzoneConfiguration();
conf.setInt(OzoneConfigKeys.OZONE_CONTAINER_CACHE_SIZE, 2);
@@ -217,5 +221,6 @@ public void testUnderlyingDBzIsClosed() throws Exception {
db3.close();
db4.close();
cache.clear();
+ FileUtils.deleteDirectory(root);
}
}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
index a959915a8aae..0ee12be72363 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
@@ -195,7 +195,8 @@ public void testDatanodeStateContext() throws IOException,
File idPath = new File(
conf.get(ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR),
OzoneConsts.OZONE_SCM_DATANODE_ID_FILE_DEFAULT);
- idPath.delete();
+ assertTrue(idPath.createNewFile());
+ assertTrue(idPath.delete());
DatanodeDetails datanodeDetails = getNewDatanodeDetails();
DatanodeDetails.Port port = DatanodeDetails.newStandalonePort(
OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT);
@@ -317,11 +318,11 @@ public void testDatanodeStateContext() throws IOException,
@Test
public void testDatanodeStateMachineWithIdWriteFail() throws Exception {
-
File idPath = new File(
conf.get(ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR),
OzoneConsts.OZONE_SCM_DATANODE_ID_FILE_DEFAULT);
- idPath.delete();
+ assertTrue(idPath.createNewFile());
+ assertTrue(idPath.delete());
DatanodeDetails datanodeDetails = getNewDatanodeDetails();
DatanodeDetails.Port port = DatanodeDetails.newStandalonePort(
OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT);
@@ -340,8 +341,7 @@ public void testDatanodeStateMachineWithIdWriteFail() throws Exception {
//Set the idPath to read only, state machine will fail to write
// datanodeId file and set the state to shutdown.
- idPath.getParentFile().mkdirs();
- idPath.getParentFile().setReadOnly();
+ assertTrue(idPath.getParentFile().setReadOnly());
task.execute(executorService);
DatanodeStateMachine.DatanodeStates newState =
@@ -398,7 +398,7 @@ public void testDatanodeStateMachineWithInvalidConfiguration()
task.await(2, TimeUnit.SECONDS);
assertEquals(DatanodeStateMachine.DatanodeStates.SHUTDOWN,
newState);
- } catch (Exception e) {
+ } catch (IOException | InterruptedException | TimeoutException | ExecutionException e) {
fail("Unexpected exception found");
}
});
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java
index 4b2ad6db7ccf..b2612c9e0fe0 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java
@@ -70,7 +70,7 @@ private void setLayoutVersion(ContainerLayoutVersion layoutVersion) {
*/
private File createContainerFile(long containerID, int replicaIndex)
throws IOException {
- new File(testRoot).mkdirs();
+ assertTrue(new File(testRoot).mkdirs());
String containerPath = containerID + ".container";
@@ -167,6 +167,8 @@ public void testCreateContainerFile(ContainerLayoutVersion layout)
kvData.lastDataScanTime().get().toEpochMilli());
assertEquals(SCAN_TIME.toEpochMilli(),
kvData.getDataScanTimestamp().longValue());
+
+ cleanup();
}
@ContainerLayoutTestInfo.ContainerTest
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java
index ca245e276c1d..a7d6a80c4683 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java
@@ -29,7 +29,6 @@
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
-import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Random;
@@ -39,7 +38,6 @@
import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
import org.apache.hadoop.ozone.container.ContainerTestHelper;
import org.apache.hadoop.ozone.container.common.impl.BlockDeletingService.ContainerBlockInfo;
import org.apache.hadoop.ozone.container.common.interfaces.ContainerDeletionChoosingPolicy;
@@ -83,8 +81,6 @@ public void testRandomChoosingPolicy(ContainerLayoutVersion layout)
conf.set(
ScmConfigKeys.OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY,
RandomContainerDeletionChoosingPolicy.class.getName());
- List pathLists = new LinkedList<>();
- pathLists.add(StorageLocation.parse(containerDir.getAbsolutePath()));
containerSet = new ContainerSet(1000);
int numContainers = 10;
@@ -146,8 +142,6 @@ public void testTopNOrderedChoosingPolicy(ContainerLayoutVersion layout)
conf.set(
ScmConfigKeys.OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY,
TopNOrderedContainerDeletionChoosingPolicy.class.getName());
- List pathLists = new LinkedList<>();
- pathLists.add(StorageLocation.parse(containerDir.getAbsolutePath()));
containerSet = new ContainerSet(1000);
int numContainers = 10;
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
index db6ca37fa652..cdb477aada82 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
@@ -38,7 +38,6 @@
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
-import java.nio.file.Paths;
import java.security.NoSuchAlgorithmException;
import java.util.ArrayList;
import java.util.Arrays;
@@ -660,7 +659,6 @@ public void testWritReadManyChunks(ContainerTestVersionInfo versionInfo)
KeyValueContainerData cNewData =
(KeyValueContainerData) container.getContainerData();
assertNotNull(cNewData);
- Path dataDir = Paths.get(cNewData.getChunksPath());
// Read chunk via file system and verify.
Checksum checksum = new Checksum(ChecksumType.CRC32, 1024 * 1024);
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestStateContext.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestStateContext.java
index 0174d800c4dd..15c8386a8931 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestStateContext.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestStateContext.java
@@ -44,6 +44,7 @@
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
@@ -572,9 +573,11 @@ public void testIsThreadPoolAvailable() throws Exception {
// task num greater than pool size
for (int i = 0; i < threadPoolSize; i++) {
- executorService.submit((Callable) futureOne::get);
+ Future future = executorService.submit((Callable) futureOne::get);
+ assertFalse(future.isDone());
}
- executorService.submit((Callable) futureTwo::get);
+ Future future = executorService.submit((Callable) futureTwo::get);
+ assertFalse(future.isDone());
assertFalse(stateContext.isThreadPoolAvailable(executorService));
@@ -592,9 +595,11 @@ public void doesNotAwaitWithoutExecute() throws Exception {
final AtomicInteger awaited = new AtomicInteger();
ExecutorService executorService = Executors.newFixedThreadPool(1);
- CompletableFuture future = new CompletableFuture<>();
- executorService.submit((Callable) future::get);
- executorService.submit((Callable) future::get);
+ CompletableFuture task = new CompletableFuture<>();
+ Future future = executorService.submit((Callable) task::get);
+ assertFalse(future.isDone());
+ future = executorService.submit((Callable) task::get);
+ assertFalse(future.isDone());
StateContext subject = new StateContext(new OzoneConfiguration(),
DatanodeStates.INIT, mock(DatanodeStateMachine.class), "") {
@@ -631,7 +636,7 @@ public DatanodeStates await(long time, TimeUnit timeUnit) {
assertEquals(0, awaited.get());
assertEquals(0, executed.get());
- future.complete("any");
+ task.complete("any");
LambdaTestUtils.await(1000, 100, () ->
subject.isThreadPoolAvailable(executorService));
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
index ef2cc4489d0d..4614ee952bba 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
@@ -185,10 +185,10 @@ public void testVolumeInInconsistentState() throws Exception {
// Create the root volume dir and create a sub-directory within it.
File newVolume = new File(volume3, HDDS_VOLUME_DIR);
System.out.println("new volume root: " + newVolume);
- newVolume.mkdirs();
+ assertTrue(newVolume.mkdirs());
assertTrue(newVolume.exists(), "Failed to create new volume root");
File dataDir = new File(newVolume, "chunks");
- dataDir.mkdirs();
+ assertTrue(dataDir.mkdirs());
assertTrue(dataDir.exists());
// The new volume is in an inconsistent state as the root dir is
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java
index 62d479175553..cae19d672198 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java
@@ -24,6 +24,7 @@
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
@@ -380,7 +381,9 @@ private File writeDbFile(
private File writeSingleFile(Path parentPath, String fileName,
String content) throws IOException {
Path path = parentPath.resolve(fileName).normalize();
- Files.createDirectories(path.getParent());
+ Path parent = path.getParent();
+ assertNotNull(parent);
+ Files.createDirectories(parent);
File file = path.toFile();
FileOutputStream fileStream = new FileOutputStream(file);
try (OutputStreamWriter writer = new OutputStreamWriter(fileStream,
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestBackgroundContainerDataScanner.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestBackgroundContainerDataScanner.java
index eb689c41d271..22fa978eb9f7 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestBackgroundContainerDataScanner.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestBackgroundContainerDataScanner.java
@@ -105,12 +105,11 @@ public void testScannerMetrics() {
@Override
public void testScannerMetricsUnregisters() {
String name = scanner.getMetrics().getName();
-
assertNotNull(DefaultMetricsSystem.instance().getSource(name));
scanner.shutdown();
scanner.run();
-
+
assertNull(DefaultMetricsSystem.instance().getSource(name));
}
@@ -203,7 +202,8 @@ public void testWithVolumeFailure() throws Exception {
GenericTestUtils.waitFor(() -> !scanner.isAlive(), 1000, 5000);
// Volume health should have been checked.
- verify(vol, atLeastOnce()).isFailed();
+ // TODO: remove the mock return value asseration after we upgrade to spotbugs 4.8 up
+ assertFalse(verify(vol, atLeastOnce()).isFailed());
// No iterations should have been run.
assertEquals(0, metrics.getNumScanIterations());
assertEquals(0, metrics.getNumContainersScanned());
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestBackgroundContainerMetadataScanner.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestBackgroundContainerMetadataScanner.java
index 15640a9c8313..88f973355f10 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestBackgroundContainerMetadataScanner.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestBackgroundContainerMetadataScanner.java
@@ -181,7 +181,8 @@ public void testWithVolumeFailure() throws Exception {
GenericTestUtils.waitFor(() -> metrics.getNumScanIterations() >= 1, 1000,
5000);
// Volume health should have been checked.
- verify(vol, atLeastOnce()).isFailed();
+ // TODO: remove the mock return value asseration after we upgrade to spotbugs 4.8 up
+ assertFalse(verify(vol, atLeastOnce()).isFailed());
// Scanner should not have shutdown when it encountered the failed volume.
assertTrue(scanner.isAlive());
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
index b6d23777a908..05a091399177 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
@@ -515,10 +515,10 @@ public void testHeartbeat() throws Exception {
}
@Test
- public void testHeartbeatWithCommandStatusReport() throws Exception {
+ public void testHeartbeatWithCommandStatusReport(@TempDir File endPointTempDir) throws Exception {
DatanodeDetails dataNode = randomDatanodeDetails();
try (EndpointStateMachine rpcEndPoint =
- createEndpoint(SCMTestUtils.getConf(tempDir),
+ createEndpoint(SCMTestUtils.getConf(endPointTempDir),
serverAddress, 1000)) {
// Add some scmCommands for heartbeat response
addScmCommands();