diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java index 1a65d2f024e7e..0dfe3ae509752 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java @@ -17,10 +17,11 @@ */ package org.apache.hadoop.hdfs.server.datanode; -import static org.hamcrest.core.Is.is; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyString; @@ -69,10 +70,10 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils.DelayAnswer; import org.apache.hadoop.util.Time; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; import org.slf4j.event.Level; @@ -112,14 +113,14 @@ public abstract class BlockReportTestBase { resetConfiguration(); } - @Before + @BeforeEach public void startUpCluster() throws IOException { REPL_FACTOR = 1; //Reset if case a test has modified the value cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL_FACTOR).build(); fs = cluster.getFileSystem(); } - @After + @AfterEach public void shutDownCluster() throws IOException { if (fs != null) { fs.close(); @@ -209,7 +210,8 @@ protected abstract void sendBlockReports(DatanodeRegistration dnR, String poolId * * @throws java.io.IOException on an error */ - @Test(timeout=300000) + @Test + @Timeout(value = 300) public void blockReport_01() throws IOException { final String METHOD_NAME = GenericTestUtils.getMethodName(); Path filePath = new Path("/" + METHOD_NAME + ".dat"); @@ -255,8 +257,7 @@ public void blockReport_01() throws IOException { for (int i = 0; i < blocksAfterReport.size(); i++) { ExtendedBlock b = blocksAfterReport.get(i).getBlock(); - assertEquals("Length of " + i + "th block is incorrect", - oldLengths[i], b.getNumBytes()); + assertEquals(oldLengths[i], b.getNumBytes(), "Length of " + i + "th block is incorrect"); } } @@ -269,7 +270,8 @@ public void blockReport_01() throws IOException { * * @throws IOException in case of errors */ - @Test(timeout=300000) + @Test + @Timeout(value = 300) public void blockReport_02() throws IOException { final String METHOD_NAME = GenericTestUtils.getMethodName(); LOG.info("Running test " + METHOD_NAME); @@ -332,10 +334,10 @@ public void blockReport_02() throws IOException { printStats(); - assertEquals("Wrong number of MissingBlocks is found", - blocks2Remove.size(), cluster.getNamesystem().getMissingBlocksCount()); - assertEquals("Wrong number of UnderReplicatedBlocks is found", - blocks2Remove.size(), cluster.getNamesystem().getUnderReplicatedBlocks()); + assertEquals(blocks2Remove.size(), cluster.getNamesystem().getMissingBlocksCount(), + "Wrong number of MissingBlocks is found"); + assertEquals(blocks2Remove.size(), cluster.getNamesystem().getUnderReplicatedBlocks(), + "Wrong number of UnderReplicatedBlocks is found"); } @@ -346,7 +348,8 @@ public void blockReport_02() throws IOException { * * @throws IOException in case of an error */ - @Test(timeout=300000) + @Test + @Timeout(value = 300) public void blockReport_03() throws IOException { final String METHOD_NAME = GenericTestUtils.getMethodName(); Path filePath = new Path("/" + METHOD_NAME + ".dat"); @@ -360,10 +363,12 @@ public void blockReport_03() throws IOException { sendBlockReports(dnR, poolId, reports); printStats(); - assertThat("Wrong number of corrupt blocks", - cluster.getNamesystem().getCorruptReplicaBlocks(), is(1L)); - assertThat("Wrong number of PendingDeletion blocks", - cluster.getNamesystem().getPendingDeletionBlocks(), is(0L)); + assertThat(cluster.getNamesystem().getCorruptReplicaBlocks()) + .as("Wrong number of corrupt blocks") + .isEqualTo((1L)); + assertThat(cluster.getNamesystem().getPendingDeletionBlocks()) + .as("Wrong number of PendingDeletion blocks") + .isEqualTo(0L); } /** @@ -374,7 +379,8 @@ public void blockReport_03() throws IOException { * * @throws IOException in case of an error */ - @Test(timeout=300000) + @Test + @Timeout(value = 300) public void blockReport_04() throws IOException { final String METHOD_NAME = GenericTestUtils.getMethodName(); Path filePath = new Path("/" + METHOD_NAME + ".dat"); @@ -396,10 +402,12 @@ public void blockReport_04() throws IOException { sendBlockReports(dnR, poolId, reports); printStats(); - assertThat("Wrong number of corrupt blocks", - cluster.getNamesystem().getCorruptReplicaBlocks(), is(0L)); - assertThat("Wrong number of PendingDeletion blocks", - cluster.getNamesystem().getPendingDeletionBlocks(), is(1L)); + assertThat(cluster.getNamesystem().getCorruptReplicaBlocks()) + .as("Wrong number of corrupt blocks") + .isEqualTo(0L); + assertThat(cluster.getNamesystem().getPendingDeletionBlocks()) + .as("Wrong number of PendingDeletion blocks") + .isEqualTo(1L); } /** @@ -410,7 +418,8 @@ public void blockReport_04() throws IOException { * * @throws IOException in case of an error */ - @Test(timeout=300000) + @Test + @Timeout(value = 300) public void blockReport_06() throws Exception { final String METHOD_NAME = GenericTestUtils.getMethodName(); Path filePath = new Path("/" + METHOD_NAME + ".dat"); @@ -426,8 +435,8 @@ public void blockReport_06() throws Exception { StorageBlockReport[] reports = getBlockReports(dn, poolId, false, false); sendBlockReports(dnR, poolId, reports); printStats(); - assertEquals("Wrong number of PendingReplication Blocks", - 0, cluster.getNamesystem().getUnderReplicatedBlocks()); + assertEquals(0, cluster.getNamesystem().getUnderReplicatedBlocks(), + "Wrong number of PendingReplication Blocks"); } /** @@ -444,7 +453,8 @@ public void blockReport_06() throws Exception { * * @throws IOException in case of an error */ - @Test(timeout=300000) + @Test + @Timeout(value = 300) public void blockReport_07() throws Exception { final String METHOD_NAME = GenericTestUtils.getMethodName(); Path filePath = new Path("/" + METHOD_NAME + ".dat"); @@ -462,23 +472,29 @@ public void blockReport_07() throws Exception { sendBlockReports(dnR, poolId, reports); printStats(); - assertThat("Wrong number of corrupt blocks", - cluster.getNamesystem().getCorruptReplicaBlocks(), is(0L)); - assertThat("Wrong number of PendingDeletion blocks", - cluster.getNamesystem().getPendingDeletionBlocks(), is(1L)); - assertThat("Wrong number of PendingReplication blocks", - cluster.getNamesystem().getPendingReplicationBlocks(), is(0L)); + assertThat(cluster.getNamesystem().getCorruptReplicaBlocks()) + .as("Wrong number of corrupt blocks") + .isEqualTo(0L); + assertThat(cluster.getNamesystem().getPendingDeletionBlocks()) + .as("Wrong number of PendingDeletion blocks") + .isEqualTo(1L); + assertThat(cluster.getNamesystem().getPendingReplicationBlocks()) + .as("Wrong number of PendingReplication blocks") + .isEqualTo(0L); reports = getBlockReports(dn, poolId, false, true); sendBlockReports(dnR, poolId, reports); printStats(); - assertThat("Wrong number of corrupt blocks", - cluster.getNamesystem().getCorruptReplicaBlocks(), is(1L)); - assertThat("Wrong number of PendingDeletion blocks", - cluster.getNamesystem().getPendingDeletionBlocks(), is(1L)); - assertThat("Wrong number of PendingReplication blocks", - cluster.getNamesystem().getPendingReplicationBlocks(), is(0L)); + assertThat(cluster.getNamesystem().getCorruptReplicaBlocks()) + .as("Wrong number of corrupt blocks") + .isEqualTo(1L); + assertThat(cluster.getNamesystem().getPendingDeletionBlocks()) + .as("Wrong number of PendingDeletion blocks") + .isEqualTo(1L); + assertThat(cluster.getNamesystem().getPendingReplicationBlocks()) + .as("Wrong number of PendingReplication blocks") + .isEqualTo(0L); printStats(); @@ -496,7 +512,8 @@ public void blockReport_07() throws Exception { * * @throws IOException in case of an error */ - @Test(timeout=300000) + @Test + @Timeout(value = 300) public void blockReport_08() throws IOException { final String METHOD_NAME = GenericTestUtils.getMethodName(); Path filePath = new Path("/" + METHOD_NAME + ".dat"); @@ -524,8 +541,8 @@ public void blockReport_08() throws IOException { StorageBlockReport[] reports = getBlockReports(dn, poolId, false, false); sendBlockReports(dnR, poolId, reports); printStats(); - assertEquals("Wrong number of PendingReplication blocks", - blocks.size(), cluster.getNamesystem().getPendingReplicationBlocks()); + assertEquals(blocks.size(), cluster.getNamesystem().getPendingReplicationBlocks(), + "Wrong number of PendingReplication blocks"); try { bc.join(); @@ -538,7 +555,8 @@ public void blockReport_08() throws IOException { // Similar to BlockReport_08 but corrupts GS and len of the TEMPORARY's // replica block. Expect the same behaviour: NN should simply ignore this // block - @Test(timeout=300000) + @Test + @Timeout(value = 300) public void blockReport_09() throws IOException { final String METHOD_NAME = GenericTestUtils.getMethodName(); Path filePath = new Path("/" + METHOD_NAME + ".dat"); @@ -567,8 +585,8 @@ public void blockReport_09() throws IOException { StorageBlockReport[] reports = getBlockReports(dn, poolId, true, true); sendBlockReports(dnR, poolId, reports); printStats(); - assertEquals("Wrong number of PendingReplication blocks", - 2, cluster.getNamesystem().getPendingReplicationBlocks()); + assertEquals(2, cluster.getNamesystem().getPendingReplicationBlocks(), + "Wrong number of PendingReplication blocks"); try { bc.join(); @@ -587,7 +605,8 @@ public void blockReport_09() throws IOException { * corrupt. * This is a regression test for HDFS-2791. */ - @Test(timeout=300000) + @Test + @Timeout(value = 300) public void testOneReplicaRbwReportArrivesAfterBlockCompleted() throws Exception { final CountDownLatch brFinished = new CountDownLatch(1); DelayAnswer delayer = new GenericTestUtils.DelayAnswer(LOG) { @@ -658,7 +677,8 @@ protected Object passThrough(InvocationOnMock invocation) } // See HDFS-10301 - @Test(timeout = 300000) + @Test + @Timeout(value = 300) public void testInterleavedBlockReports() throws IOException, ExecutionException, InterruptedException { int numConcurrentBlockReports = 3; @@ -696,7 +716,7 @@ public Void call() throws IOException { executorService.shutdown(); // Verify that the storages match before and after the test - Assert.assertArrayEquals(storageInfos, dnDescriptor.getStorageInfos()); + assertArrayEquals(storageInfos, dnDescriptor.getStorageInfos()); } private void waitForTempReplica(Block bl, int DN_N1) throws IOException { @@ -729,8 +749,7 @@ private void waitForTempReplica(Block bl, int DN_N1) throws IOException { LOG.debug("Has been waiting for " + waiting_period + " ms."); } if (waiting_period > TIMEOUT) - assertTrue("Was waiting too long to get ReplicaInfo from a datanode", - tooLongWait); + assertTrue(tooLongWait, "Was waiting too long to get ReplicaInfo from a datanode"); } HdfsServerConstants.ReplicaState state = r.getState(); @@ -746,8 +765,7 @@ private void waitForTempReplica(Block bl, int DN_N1) throws IOException { " is in state " + state.getValue()); } if (Time.monotonicNow() - start > TIMEOUT) - assertTrue("Was waiting too long for a replica to become TEMPORARY", - tooLongWait); + assertTrue(tooLongWait, "Was waiting too long for a replica to become TEMPORARY"); } if(LOG.isDebugEnabled()) { LOG.debug("Replica state after the loop " + state.getValue()); @@ -910,7 +928,7 @@ public void run() { startDNandWait(filePath, true); } catch (Exception e) { e.printStackTrace(); - Assert.fail("Failed to start BlockChecker: " + e); + fail("Failed to start BlockChecker: " + e); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java index dba5a146f0c49..31474fa0990ab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java @@ -39,8 +39,7 @@ import java.util.function.Supplier; -import static org.hamcrest.core.Is.is; -import static org.junit.Assert.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; /** * DO NOT ADD MOCKITO IMPORTS HERE Or Downstream projects may not @@ -220,11 +219,12 @@ public static void reconfigureDataNode(DataNode dn, File... newVols) dnNewDataDirs.append(newVol.getAbsolutePath()); } try { - assertThat( + assertEquals( dn.reconfigurePropertyImpl( DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dnNewDataDirs.toString()), - is(dn.getConf().get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY))); + dn.getConf().get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY) + ); } catch (ReconfigurationException e) { // This can be thrown if reconfiguration tries to use a failed volume. // We need to swallow the exception, because some of our tests want to diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/InternalDataNodeTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/InternalDataNodeTestUtils.java index 2b04e2707a034..0a0313fb77a01 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/InternalDataNodeTestUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/InternalDataNodeTestUtils.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.server.datanode; +import static org.junit.jupiter.api.Assertions.assertEquals; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -41,7 +42,6 @@ import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; -import org.junit.Assert; import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; @@ -172,8 +172,8 @@ public DatanodeRegistration answer(InvocationOnMock invocation) @Override DatanodeProtocolClientSideTranslatorPB connectToNN( InetSocketAddress nnAddr) throws IOException { - Assert.assertEquals(nnSocketAddr, nnAddr); - return namenode; + assertEquals(nnSocketAddr, nnAddr); + return namenode; } }; // Trigger a heartbeat so that it acknowledges the NN as active. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBatchIbr.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBatchIbr.java index 3365f934f9a20..2ec05a7682031 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBatchIbr.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBatchIbr.java @@ -19,6 +19,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_INCREMENTAL_INTERVAL_MSEC_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.util.List; @@ -45,7 +47,6 @@ import org.apache.hadoop.test.MetricsAsserts; import org.apache.hadoop.util.Time; import org.slf4j.event.Level; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; /** @@ -161,7 +162,7 @@ public Boolean call() throws Exception { }); } for(int i = 0; i < NUM_FILES; i++) { - Assertions.assertTrue(verifyService.take().get()); + assertTrue(verifyService.take().get()); } final long testEndTime = Time.monotonicNow(); @@ -247,7 +248,7 @@ static boolean verifyFile(Path f, DistributedFileSystem dfs) { for(int i = 0; i < numBlocks; i++) { in.read(computed); nextBytes(i, seed, expected); - Assertions.assertArrayEquals(expected, computed); + assertArrayEquals(expected, computed); } return true; } catch(Exception e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolManager.java index a406adf36ad31..0295470400ee0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolManager.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hdfs.server.datanode; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import java.net.InetSocketAddress; @@ -31,7 +33,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.mockito.Mockito; @@ -159,9 +160,9 @@ public void testInternalNameService() throws Exception { bpm.refreshNamenodes(conf); assertEquals("create #1\n", log.toString()); Map map = bpm.getBpByNameserviceId(); - Assertions.assertFalse(map.containsKey("ns2")); - Assertions.assertFalse(map.containsKey("ns3")); - Assertions.assertTrue(map.containsKey("ns1")); + assertFalse(map.containsKey("ns2")); + assertFalse(map.containsKey("ns3")); + assertTrue(map.containsKey("ns1")); log.setLength(0); } @@ -179,18 +180,18 @@ public void testNameServiceNeedToBeResolved() throws Exception { "create #2\n" + "create #3\n", log.toString()); Map map = bpm.getBpByNameserviceId(); - Assertions.assertTrue(map.containsKey("ns1")); - Assertions.assertTrue(map.containsKey("ns2")); - Assertions.assertTrue(map.containsKey("ns3")); - Assertions.assertEquals(2, map.get("ns3").getBPServiceActors().size()); - Assertions.assertEquals("ns3-" + MockDomainNameResolver.FQDN_1 + "-8020", + assertTrue(map.containsKey("ns1")); + assertTrue(map.containsKey("ns2")); + assertTrue(map.containsKey("ns3")); + assertEquals(2, map.get("ns3").getBPServiceActors().size()); + assertEquals("ns3-" + MockDomainNameResolver.FQDN_1 + "-8020", map.get("ns3").getBPServiceActors().get(0).getNnId()); - Assertions.assertEquals("ns3-" + MockDomainNameResolver.FQDN_2 + "-8020", + assertEquals("ns3-" + MockDomainNameResolver.FQDN_2 + "-8020", map.get("ns3").getBPServiceActors().get(1).getNnId()); - Assertions.assertEquals( + assertEquals( new InetSocketAddress(MockDomainNameResolver.FQDN_1, 8020), map.get("ns3").getBPServiceActors().get(0).getNNSocketAddress()); - Assertions.assertEquals( + assertEquals( new InetSocketAddress(MockDomainNameResolver.FQDN_2, 8020), map.get("ns3").getBPServiceActors().get(1).getNNSocketAddress()); log.setLength(0); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java index 995a135c4e30f..570d41a69dba4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java @@ -21,8 +21,10 @@ import org.apache.hadoop.hdfs.AppendTestUtil; import org.apache.hadoop.hdfs.server.namenode.NameNode; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.ArgumentMatchers.anyList; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyBoolean; @@ -51,7 +53,9 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; +import org.apache.hadoop.test.TestName; import org.apache.hadoop.thirdparty.com.google.common.collect.Iterators; +import org.junit.jupiter.api.extension.RegisterExtension; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -93,12 +97,10 @@ import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.Time; import org.slf4j.event.Level; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TestName; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; @@ -131,7 +133,8 @@ public class TestBlockRecovery { private final static ExtendedBlock block = new ExtendedBlock(POOL_ID, BLOCK_ID, BLOCK_LEN, GEN_STAMP); - @Rule + @SuppressWarnings("checkstyle:VisibilityModifier") + @RegisterExtension public TestName currentTestName = new TestName(); private final int cellSize = @@ -170,7 +173,7 @@ public class TestBlockRecovery { * Starts an instance of DataNode * @throws IOException */ - @Before + @BeforeEach public void startUp() throws IOException, URISyntaxException { tearDownDone = false; conf = new HdfsConfiguration(); @@ -228,8 +231,8 @@ public DatanodeRegistration answer(InvocationOnMock invocation) @Override DatanodeProtocolClientSideTranslatorPB connectToNN( InetSocketAddress nnAddr) throws IOException { - Assert.assertEquals(NN_ADDR, nnAddr); - return namenode; + assertEquals(NN_ADDR, nnAddr); + return namenode; } }; // Trigger a heartbeat so that it acknowledges the NN as active. @@ -259,15 +262,14 @@ public Boolean get() { } catch (InterruptedException e) { LOG.warn("InterruptedException while waiting to see active NN", e); } - Assert.assertNotNull("Failed to get ActiveNN", - dn.getAllBpOs().get(0).getActiveNN()); + assertNotNull(dn.getAllBpOs().get(0).getActiveNN(), "Failed to get ActiveNN"); } /** * Cleans the resources and closes the instance of datanode * @throws IOException if an error occurred */ - @After + @AfterEach public void tearDown() throws IOException { if (!tearDownDone && dn != null) { try { @@ -277,8 +279,7 @@ public void tearDown() throws IOException { } finally { File dir = new File(DATA_DIR); if (dir.exists()) - Assert.assertTrue( - "Cannot delete data-node dirs", FileUtil.fullyDelete(dir)); + assertTrue(FileUtil.fullyDelete(dir), "Cannot delete data-node dirs"); } tearDownDone = true; } @@ -317,7 +318,8 @@ private void testSyncReplicas(ReplicaRecoveryInfo replica1, * Two replicas are in Finalized state * @throws IOException in case of an error */ - @Test(timeout=60000) + @Test + @Timeout(value = 60) public void testFinalizedReplicas () throws IOException { if(LOG.isDebugEnabled()) { LOG.debug("Running " + GenericTestUtils.getMethodName()); @@ -344,9 +346,9 @@ public void testFinalizedReplicas () throws IOException { try { testSyncReplicas(replica1, replica2, dn1, dn2); - Assert.fail("Two finalized replicas should not have different lengthes!"); + fail("Two finalized replicas should not have different lengthes!"); } catch (IOException e) { - Assert.assertTrue(e.getMessage().startsWith( + assertTrue(e.getMessage().startsWith( "Inconsistent size of finalized replicas. ")); } } @@ -356,7 +358,8 @@ public void testFinalizedReplicas () throws IOException { * One replica is Finalized and another is RBW. * @throws IOException in case of an error */ - @Test(timeout=60000) + @Test + @Timeout(value = 60) public void testFinalizedRbwReplicas() throws IOException { if(LOG.isDebugEnabled()) { LOG.debug("Running " + GenericTestUtils.getMethodName()); @@ -399,7 +402,8 @@ public void testFinalizedRbwReplicas() throws IOException { * * @throws IOException in case of an error */ - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testFinalizedRwrReplicas() throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("Running " + GenericTestUtils.getMethodName()); @@ -441,7 +445,8 @@ public void testFinalizedRwrReplicas() throws IOException { * Two replicas are RBW. * @throws IOException in case of an error */ - @Test(timeout=60000) + @Test + @Timeout(value = 60) public void testRBWReplicas() throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("Running " + GenericTestUtils.getMethodName()); @@ -466,7 +471,8 @@ public void testRBWReplicas() throws IOException { * * @throws IOException in case of an error */ - @Test(timeout=60000) + @Test + @Timeout(value = 60) public void testRBW_RWRReplicas() throws IOException { if(LOG.isDebugEnabled()) { LOG.debug("Running " + GenericTestUtils.getMethodName()); @@ -491,7 +497,8 @@ public void testRBW_RWRReplicas() throws IOException { * * @throws IOException in case of an error */ - @Test(timeout=60000) + @Test + @Timeout(value = 60) public void testRWRReplicas() throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("Running " + GenericTestUtils.getMethodName()); @@ -528,7 +535,8 @@ private Collection initRecoveringBlocks() throws IOException { * @throws IOException * in case of an error */ - @Test(timeout=60000) + @Test + @Timeout(value = 60) public void testRecoveryInProgressException() throws IOException, InterruptedException { if(LOG.isDebugEnabled()) { @@ -553,7 +561,8 @@ public void testRecoveryInProgressException() * @throws IOException * in case of an error */ - @Test(timeout=60000) + @Test + @Timeout(value = 60) public void testErrorReplicas() throws IOException, InterruptedException { if(LOG.isDebugEnabled()) { LOG.debug("Running " + GenericTestUtils.getMethodName()); @@ -580,7 +589,8 @@ public void testErrorReplicas() throws IOException, InterruptedException { * * @throws IOException in case of an error */ - @Test(timeout=60000) + @Test + @Timeout(value = 60) public void testZeroLenReplicas() throws IOException, InterruptedException { if(LOG.isDebugEnabled()) { LOG.debug("Running " + GenericTestUtils.getMethodName()); @@ -620,7 +630,8 @@ private List initBlockRecords(DataNode spyDN) throws IOException { * * @throws IOException in case of an error */ - @Test(timeout=60000) + @Test + @Timeout(value = 60) public void testFailedReplicaUpdate() throws IOException { if(LOG.isDebugEnabled()) { LOG.debug("Running " + GenericTestUtils.getMethodName()); @@ -642,7 +653,8 @@ public void testFailedReplicaUpdate() throws IOException { * * @throws IOException in case of an error */ - @Test(timeout=60000) + @Test + @Timeout(value = 60) public void testNoReplicaUnderRecovery() throws IOException { if(LOG.isDebugEnabled()) { LOG.debug("Running " + GenericTestUtils.getMethodName()); @@ -667,7 +679,8 @@ public void testNoReplicaUnderRecovery() throws IOException { * * @throws IOException in case of an error */ - @Test(timeout=60000) + @Test + @Timeout(value = 60) public void testNotMatchedReplicaID() throws IOException { if(LOG.isDebugEnabled()) { LOG.debug("Running " + GenericTestUtils.getMethodName()); @@ -702,7 +715,8 @@ public void testNotMatchedReplicaID() throws IOException { * throw an exception. * @throws Exception */ - @Test(timeout=60000) + @Test + @Timeout(value = 60) public void testRURReplicas() throws Exception { if (LOG.isDebugEnabled()) { LOG.debug("Running " + GenericTestUtils.getMethodName()); @@ -724,16 +738,18 @@ public void testRURReplicas() throws Exception { } catch (IOException e) { // expect IOException to be thrown here e.printStackTrace(); - assertTrue("Wrong exception was thrown: " + e.getMessage(), - e.getMessage().contains("Found 1 replica(s) for block " + block + - " but none is in RWR or better state")); + assertTrue( + e.getMessage().contains( + "Found 1 replica(s) for block " + block + " but none is in RWR or better state"), + "Wrong exception was thrown: " + e.getMessage()); exceptionThrown = true; } finally { assertTrue(exceptionThrown); } } - @Test(timeout=60000) + @Test + @Timeout(value = 60) public void testSafeLength() throws Exception { // hard coded policy to work with hard coded test suite ErasureCodingPolicy ecPolicy = StripedFileTestUtil.getDefaultECPolicy(); @@ -752,8 +768,8 @@ public void testSafeLength() throws Exception { blockLengths[id], 0, null); syncList.put((long) id, new BlockRecord(null, null, rInfo)); } - Assert.assertEquals("BLOCK_LENGTHS_SUITE[" + i + "]", safeLength, - recoveryTask.getSafeLength(syncList)); + assertEquals(safeLength, recoveryTask.getSafeLength(syncList), + "BLOCK_LENGTHS_SUITE[" + i + "]"); } } @@ -806,7 +822,8 @@ private interface TestStopWorkerRunnable { void run(RecoveringBlock recoveringBlock) throws Exception; } - @Test(timeout=90000) + @Test + @Timeout(value = 90) public void testInitReplicaRecoveryDoesNotHoldLock() throws Exception { testStopWorker(new TestStopWorkerRunnable() { @Override @@ -827,7 +844,8 @@ public void run(RecoveringBlock recoveringBlock) throws Exception { }); } - @Test(timeout=90000) + @Test + @Timeout(value = 90) public void testRecoverAppendDoesNotHoldLock() throws Exception { testStopWorker(new TestStopWorkerRunnable() { @Override @@ -851,7 +869,8 @@ public void run(RecoveringBlock recoveringBlock) throws Exception { }); } - @Test(timeout=90000) + @Test + @Timeout(value = 90) public void testRecoverCloseDoesNotHoldLock() throws Exception { testStopWorker(new TestStopWorkerRunnable() { @Override @@ -885,8 +904,7 @@ private void testStopWorker(final TestStopWorkerRunnable tswr) // We need a long value for the data xceiver stop timeout. // Otherwise the timeout will trigger, and we will not have tested that // thread join was done locklessly. - Assert.assertEquals( - TEST_STOP_WORKER_XCEIVER_STOP_TIMEOUT_MILLIS, + assertEquals(TEST_STOP_WORKER_XCEIVER_STOP_TIMEOUT_MILLIS, dn.getDnConf().getXceiverStopTimeout()); final TestStopWorkerSemaphore progressParent = new TestStopWorkerSemaphore(); @@ -966,7 +984,7 @@ public void run() { // unit test framework, so we have to do it manually here. String failureReason = failure.get(); if (failureReason != null) { - Assert.fail("Thread failure: " + failureReason); + fail("Thread failure: " + failureReason); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery2.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery2.java index 5475c7fbfa7db..74d18b55c6cdc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery2.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery2.java @@ -48,7 +48,6 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.TestName; import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -77,6 +76,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.ArgumentMatchers.anyString; @@ -168,7 +169,7 @@ public void startUp() throws IOException { @Override DatanodeProtocolClientSideTranslatorPB connectToNN( InetSocketAddress nnAddr) throws IOException { - Assertions.assertEquals(NN_ADDR, nnAddr); + assertEquals(NN_ADDR, nnAddr); return namenode; } }; @@ -192,7 +193,7 @@ private void waitForActiveNN() { } catch (InterruptedException e) { LOG.warn("InterruptedException while waiting to see active NN", e); } - Assertions.assertNotNull(dn.getAllBpOs().get(0).getActiveNN(), + assertNotNull(dn.getAllBpOs().get(0).getActiveNN(), "Failed to get ActiveNN"); } @@ -210,7 +211,7 @@ public void tearDown() throws IOException { } finally { File dir = new File(DATA_DIR); if (dir.exists()) { - Assertions.assertTrue(FileUtil.fullyDelete(dir), "Cannot delete data-node dirs"); + assertTrue(FileUtil.fullyDelete(dir), "Cannot delete data-node dirs"); } } tearDownDone = true; @@ -264,12 +265,12 @@ public void testRaceBetweenReplicaRecoveryAndFinalizeBlock() try { out.close(); } catch (IOException e) { - Assertions.assertTrue(e.getMessage().contains("are bad. Aborting..."), + assertTrue(e.getMessage().contains("are bad. Aborting..."), "Writing should fail"); } finally { recoveryThread.join(); } - Assertions.assertTrue(recoveryInitResult.get(), + assertTrue(recoveryInitResult.get(), "Recovery should be initiated successfully"); dataNode.updateReplicaUnderRecovery(block.getBlock(), block.getBlock() diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockScanner.java index 9e0743acb4fed..b96a060b3d31a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockScanner.java @@ -25,8 +25,8 @@ import static org.apache.hadoop.hdfs.server.datanode.BlockScanner.Conf.INTERNAL_VOLUME_SCANNER_SCAN_RESULT_HANDLER; import static org.apache.hadoop.hdfs.server.datanode.BlockScanner.Conf.INTERNAL_DFS_BLOCK_SCANNER_CURSOR_SAVE_INTERVAL_MS; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; import java.io.Closeable; @@ -62,7 +62,6 @@ import org.apache.hadoop.hdfs.server.datanode.VolumeScanner.Statistics; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -704,36 +703,36 @@ public void testNextSorted() throws Exception { arr.add("3"); arr.add("5"); arr.add("7"); - Assertions.assertEquals("3", FsVolumeImpl.nextSorted(arr, "2")); - Assertions.assertEquals("3", FsVolumeImpl.nextSorted(arr, "1")); - Assertions.assertEquals("1", FsVolumeImpl.nextSorted(arr, "")); - Assertions.assertEquals("1", FsVolumeImpl.nextSorted(arr, null)); - Assertions.assertEquals(null, FsVolumeImpl.nextSorted(arr, "9")); + assertEquals("3", FsVolumeImpl.nextSorted(arr, "2")); + assertEquals("3", FsVolumeImpl.nextSorted(arr, "1")); + assertEquals("1", FsVolumeImpl.nextSorted(arr, "")); + assertEquals("1", FsVolumeImpl.nextSorted(arr, null)); + assertEquals(null, FsVolumeImpl.nextSorted(arr, "9")); } @Test @Timeout(value = 120) public void testCalculateNeededBytesPerSec() throws Exception { // If we didn't check anything the last hour, we should scan now. - Assertions.assertTrue( + assertTrue( VolumeScanner.calculateShouldScan("test", 100, 0, 0, 60)); // If, on average, we checked 101 bytes/s checked during the last hour, // stop checking now. - Assertions.assertFalse(VolumeScanner. + assertFalse(VolumeScanner. calculateShouldScan("test", 100, 101 * 3600, 1000, 5000)); // Target is 1 byte / s, but we didn't scan anything in the last minute. // Should scan now. - Assertions.assertTrue(VolumeScanner. + assertTrue(VolumeScanner. calculateShouldScan("test", 1, 3540, 0, 60)); // Target is 1000000 byte / s, but we didn't scan anything in the last // minute. Should scan now. - Assertions.assertTrue(VolumeScanner. + assertTrue(VolumeScanner. calculateShouldScan("test", 100000L, 354000000L, 0, 60)); - Assertions.assertFalse(VolumeScanner. + assertFalse(VolumeScanner. calculateShouldScan("test", 100000L, 365000000L, 0, 60)); } @@ -847,7 +846,7 @@ public Boolean get() { // We should not have rescanned the "suspect block", // because it was recently rescanned by the suspect block system. // This is a test of the "suspect block" rate limiting. - Assertions.assertFalse(info.goodBlocks.contains(first), "We should not " + + assertFalse(info.goodBlocks.contains(first), "We should not " + "have rescanned block " + first + ", because it should have been " + "in recentSuspectBlocks."); info.blocksScanned = 0; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java index cfaf9d9a9177a..05a4107adbbd6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java @@ -42,8 +42,10 @@ import org.apache.hadoop.io.nativeio.NativeIOException; import static org.apache.hadoop.io.nativeio.NativeIO.POSIX.POSIX_FADV_DONTNEED; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -241,7 +243,7 @@ public void testFadviseAfterWriteThenRead() throws Exception { // read file readHdfsFile(fs, new Path(TEST_PATH), Long.MAX_VALUE, true); // verify that we dropped everything from the cache. - Assertions.assertNotNull(stats); + assertNotNull(stats); stats.assertDroppedInRange(0, TEST_PATH_LEN - WRITE_PACKET_SIZE); } finally { if (cluster != null) { @@ -287,7 +289,7 @@ public void testClientDefaults() throws Exception { // read file readHdfsFile(fs, new Path(TEST_PATH), Long.MAX_VALUE, null); // verify that we dropped everything from the cache. - Assertions.assertNotNull(stats); + assertNotNull(stats); stats.assertDroppedInRange(0, TEST_PATH_LEN - WRITE_PACKET_SIZE); } finally { if (cluster != null) { @@ -366,7 +368,7 @@ public void testNoFadviseAfterWriteThenRead() throws Exception { TEST_PATH, 0, Long.MAX_VALUE).get(0).getBlock(); String fadvisedFileName = cluster.getBlockFile(0, block).getName(); Stats stats = tracker.getStats(fadvisedFileName); - Assertions.assertNull(stats); + assertNull(stats); // read file readHdfsFile(fs, new Path(TEST_PATH), Long.MAX_VALUE, false); @@ -394,7 +396,7 @@ public void testSeekAfterSetDropBehind() throws Exception { createHdfsFile(fs, new Path(TEST_PATH), TEST_PATH_LEN, false); // verify that we can seek after setDropBehind try (FSDataInputStream fis = fs.open(new Path(TEST_PATH))) { - Assertions.assertTrue(fis.read() != -1); // create BlockReader + assertTrue(fis.read() != -1); // create BlockReader fis.setDropBehind(false); // clear BlockReader fis.seek(2); // seek } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDNUsageReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDNUsageReport.java index 8587f9dc71b46..e882c5db426bb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDNUsageReport.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDNUsageReport.java @@ -21,10 +21,12 @@ import org.apache.hadoop.hdfs.server.protocol.DataNodeUsageReport; import org.apache.hadoop.hdfs.server.protocol.DataNodeUsageReportUtil; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; + +import static org.junit.jupiter.api.Assertions.assertEquals; /** * Test class for {@link DataNodeUsageReport}. @@ -40,12 +42,12 @@ public class TestDNUsageReport { private long readBlock; private long timeSinceLastReport; - @Before + @BeforeEach public void setup() throws IOException { dnUsageUtil = new DataNodeUsageReportUtil(); } - @After + @AfterEach public void clear() throws IOException { dnUsageUtil = null; } @@ -54,13 +56,14 @@ public void clear() throws IOException { * Ensure that storage type and storage state are propagated * in Storage Reports. */ - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testUsageReport() throws IOException { // Test1 DataNodeUsageReport report = dnUsageUtil.getUsageReport(0, 0, 0, 0, 0, 0, 0); - Assert.assertEquals(report, DataNodeUsageReport.EMPTY_REPORT); + assertEquals(report, DataNodeUsageReport.EMPTY_REPORT); // Test2 bytesWritten = 200; @@ -74,22 +77,18 @@ public void testUsageReport() throws IOException { bytesRead, writeTime, readTime, writeBlock, readBlock, timeSinceLastReport); - Assert.assertEquals(bytesWritten / timeSinceLastReport, - report.getBytesWrittenPerSec()); - Assert.assertEquals(bytesRead / timeSinceLastReport, - report.getBytesReadPerSec()); - Assert.assertEquals(writeTime, report.getWriteTime()); - Assert.assertEquals(readTime, report.getReadTime()); - Assert.assertEquals(writeBlock / timeSinceLastReport, - report.getBlocksWrittenPerSec()); - Assert.assertEquals(readBlock / timeSinceLastReport, - report.getBlocksReadPerSec()); + assertEquals(bytesWritten / timeSinceLastReport, report.getBytesWrittenPerSec()); + assertEquals(bytesRead / timeSinceLastReport, report.getBytesReadPerSec()); + assertEquals(writeTime, report.getWriteTime()); + assertEquals(readTime, report.getReadTime()); + assertEquals(writeBlock / timeSinceLastReport, report.getBlocksWrittenPerSec()); + assertEquals(readBlock / timeSinceLastReport, report.getBlocksReadPerSec()); // Test3 DataNodeUsageReport report2 = dnUsageUtil.getUsageReport(bytesWritten, bytesRead, writeTime, readTime, writeBlock, readBlock, 0); - Assert.assertEquals(report, report2); + assertEquals(report, report2); // Test4 long bytesWritten2 = 50000; @@ -103,15 +102,15 @@ public void testUsageReport() throws IOException { bytesRead2, writeTime2, readTime2, writeBlock2, readBlock2, timeSinceLastReport); - Assert.assertEquals((bytesWritten2 - bytesWritten) / timeSinceLastReport, + assertEquals((bytesWritten2 - bytesWritten) / timeSinceLastReport, report2.getBytesWrittenPerSec()); - Assert.assertEquals((bytesRead2 - bytesRead) / timeSinceLastReport, + assertEquals((bytesRead2 - bytesRead) / timeSinceLastReport, report2.getBytesReadPerSec()); - Assert.assertEquals(writeTime2 - writeTime, report2.getWriteTime()); - Assert.assertEquals(readTime2 - readTime, report2.getReadTime()); - Assert.assertEquals((writeBlock2 - writeBlock) / timeSinceLastReport, + assertEquals(writeTime2 - writeTime, report2.getWriteTime()); + assertEquals(readTime2 - readTime, report2.getReadTime()); + assertEquals((writeBlock2 - writeBlock) / timeSinceLastReport, report2.getBlocksWrittenPerSec()); - Assert.assertEquals((readBlock2 - readBlock) / timeSinceLastReport, + assertEquals((readBlock2 - readBlock) / timeSinceLastReport, report2.getBlocksReadPerSec()); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeECN.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeECN.java index ce81872c22f99..bdbe9c29cbc7a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeECN.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeECN.java @@ -21,12 +21,13 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import java.io.IOException; +import static org.junit.jupiter.api.Assertions.assertNotEquals; + @Timeout(300) public class TestDataNodeECN { @@ -38,7 +39,7 @@ public void testECNFlag() throws IOException { try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); PipelineAck.ECN ecn = cluster.getDataNodes().get(0).getECN(); - Assertions.assertNotEquals(PipelineAck.ECN.DISABLED, ecn); + assertNotEquals(PipelineAck.ECN.DISABLED, ecn); } finally { if (cluster != null) { cluster.shutdown(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeErasureCodingMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeErasureCodingMetrics.java index de6c56e6d432c..04f2ee1de910a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeErasureCodingMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeErasureCodingMetrics.java @@ -38,11 +38,11 @@ import static org.apache.hadoop.test.MetricsAsserts.getLongCounter; import static org.apache.hadoop.test.MetricsAsserts.getLongCounterWithoutCheck; import static org.apache.hadoop.test.MetricsAsserts.getMetrics; +import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertTrue; import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -92,26 +92,26 @@ public void tearDown() { @Test @Timeout(value = 120) public void testFullBlock() throws Exception { - Assertions.assertEquals(0, getLongMetric("EcReconstructionReadTimeMillis")); - Assertions.assertEquals(0, getLongMetric("EcReconstructionDecodingTimeMillis")); - Assertions.assertEquals(0, getLongMetric("EcReconstructionWriteTimeMillis")); + assertEquals(0, getLongMetric("EcReconstructionReadTimeMillis")); + assertEquals(0, getLongMetric("EcReconstructionDecodingTimeMillis")); + assertEquals(0, getLongMetric("EcReconstructionWriteTimeMillis")); doTest("/testEcMetrics", blockGroupSize, 0); - Assertions.assertEquals(1, getLongMetric("EcReconstructionTasks"), + assertEquals(1, getLongMetric("EcReconstructionTasks"), "EcReconstructionTasks should be "); - Assertions.assertEquals(0, getLongMetric("EcFailedReconstructionTasks"), + assertEquals(0, getLongMetric("EcFailedReconstructionTasks"), "EcFailedReconstructionTasks should be "); - Assertions.assertTrue(getLongMetric("EcDecodingTimeNanos") > 0); - Assertions.assertEquals(blockGroupSize, getLongMetric("EcReconstructionBytesRead"), + assertTrue(getLongMetric("EcDecodingTimeNanos") > 0); + assertEquals(blockGroupSize, getLongMetric("EcReconstructionBytesRead"), "EcReconstructionBytesRead should be "); - Assertions.assertEquals(blockSize, getLongMetric("EcReconstructionBytesWritten"), + assertEquals(blockSize, getLongMetric("EcReconstructionBytesWritten"), "EcReconstructionBytesWritten should be "); - Assertions.assertEquals(0, getLongMetricWithoutCheck("EcReconstructionRemoteBytesRead"), + assertEquals(0, getLongMetricWithoutCheck("EcReconstructionRemoteBytesRead"), "EcReconstructionRemoteBytesRead should be "); - Assertions.assertTrue(getLongMetric("EcReconstructionReadTimeMillis") > 0); - Assertions.assertTrue(getLongMetric("EcReconstructionDecodingTimeMillis") > 0); - Assertions.assertTrue(getLongMetric("EcReconstructionWriteTimeMillis") > 0); + assertTrue(getLongMetric("EcReconstructionReadTimeMillis") > 0); + assertTrue(getLongMetric("EcReconstructionDecodingTimeMillis") > 0); + assertTrue(getLongMetric("EcReconstructionWriteTimeMillis") > 0); } // A partial block, reconstruct the partial block @@ -121,11 +121,11 @@ public void testReconstructionBytesPartialGroup1() throws Exception { final int fileLen = blockSize / 10; doTest("/testEcBytes", fileLen, 0); - Assertions.assertEquals(fileLen, getLongMetric("EcReconstructionBytesRead"), + assertEquals(fileLen, getLongMetric("EcReconstructionBytesRead"), "EcReconstructionBytesRead should be "); - Assertions.assertEquals(fileLen, getLongMetric("EcReconstructionBytesWritten"), + assertEquals(fileLen, getLongMetric("EcReconstructionBytesWritten"), "EcReconstructionBytesWritten should be "); - Assertions.assertEquals(0, getLongMetricWithoutCheck("EcReconstructionRemoteBytesRead"), + assertEquals(0, getLongMetricWithoutCheck("EcReconstructionRemoteBytesRead"), "EcReconstructionRemoteBytesRead should be "); } @@ -136,12 +136,12 @@ public void testReconstructionBytesPartialGroup2() throws Exception { final int fileLen = cellSize * dataBlocks + cellSize + cellSize / 10; doTest("/testEcBytes", fileLen, 0); - Assertions.assertEquals(cellSize * dataBlocks + assertEquals(cellSize * dataBlocks + cellSize + cellSize / 10, getLongMetric("EcReconstructionBytesRead"), "ecReconstructionBytesRead should be "); - Assertions.assertEquals(blockSize, getLongMetric("EcReconstructionBytesWritten"), + assertEquals(blockSize, getLongMetric("EcReconstructionBytesWritten"), "EcReconstructionBytesWritten should be "); - Assertions.assertEquals(0, getLongMetricWithoutCheck("EcReconstructionRemoteBytesRead"), + assertEquals(0, getLongMetricWithoutCheck("EcReconstructionRemoteBytesRead"), "EcReconstructionRemoteBytesRead should be "); } @@ -152,13 +152,13 @@ public void testReconstructionBytesPartialGroup3() throws Exception { final int fileLen = cellSize * dataBlocks + cellSize + cellSize / 10; doTest("/testEcBytes", fileLen, 1); - Assertions.assertEquals(cellSize * dataBlocks + (cellSize / 10) * 2, + assertEquals(cellSize * dataBlocks + (cellSize / 10) * 2, getLongMetric("EcReconstructionBytesRead"), "ecReconstructionBytesRead should be "); - Assertions.assertEquals(cellSize + cellSize / 10, + assertEquals(cellSize + cellSize / 10, getLongMetric("EcReconstructionBytesWritten"), "ecReconstructionBytesWritten should be "); - Assertions.assertEquals(0, getLongMetricWithoutCheck("EcReconstructionRemoteBytesRead"), + assertEquals(0, getLongMetricWithoutCheck("EcReconstructionRemoteBytesRead"), "EcReconstructionRemoteBytesRead should be "); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java index 098d8a46f5762..babce8d5833cf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java @@ -51,9 +51,9 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Time; -import org.junit.After; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; @@ -83,20 +83,16 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY; import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows; -import static org.hamcrest.CoreMatchers.anyOf; -import static org.hamcrest.CoreMatchers.containsString; -import static org.hamcrest.CoreMatchers.not; -import static org.hamcrest.core.Is.is; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.timeout; +import static org.assertj.core.api.Assertions.assertThat; public class TestDataNodeHotSwapVolumes { private static final Logger LOG = LoggerFactory.getLogger( @@ -106,7 +102,7 @@ public class TestDataNodeHotSwapVolumes { private MiniDFSCluster cluster; private Configuration conf; - @After + @AfterEach public void tearDown() { shutdown(); } @@ -340,9 +336,9 @@ private void addVolumes(int numNewVolumes, CountDownLatch waitLatch) String newDataDir = newDataDirBuf.toString(); assertThat( - "DN did not update its own config", - dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, newDataDir), - is(conf.get(DFS_DATANODE_DATA_DIR_KEY))); + dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, newDataDir)) + .as("DN did not update its own config") + .isEqualTo(conf.get(DFS_DATANODE_DATA_DIR_KEY)); // Await on the latch for needed operations to complete waitLatch.await(); @@ -370,8 +366,8 @@ public int compare(StorageLocation o1, StorageLocation o2) { }; Collections.sort(expectedStorageLocations, comparator); Collections.sort(effectiveStorageLocations, comparator); - assertEquals("Effective volumes doesnt match expected", - expectedStorageLocations, effectiveStorageLocations); + assertEquals(expectedStorageLocations, effectiveStorageLocations, + "Effective volumes doesnt match expected"); // Check that all newly created volumes are appropriately formatted. for (File volumeDir : newVolumeDirs) { @@ -399,7 +395,8 @@ private List> getNumBlocksReport(int namesystemIdx) { /** * Test adding one volume on a running MiniDFSCluster with only one NameNode. */ - @Test(timeout=60000) + @Test + @Timeout(value = 60) public void testAddOneNewVolume() throws IOException, ReconfigurationException, InterruptedException, TimeoutException { @@ -433,7 +430,8 @@ public void testAddOneNewVolume() * Test re-adding one volume with some blocks on a running MiniDFSCluster * with only one NameNode to reproduce HDFS-13677. */ - @Test(timeout=60000) + @Test + @Timeout(value = 60) public void testReAddVolumeWithBlocks() throws IOException, ReconfigurationException, InterruptedException, TimeoutException { @@ -454,10 +452,10 @@ public void testReAddVolumeWithBlocks() Collection oldDirs = getDataDirs(dn); String newDirs = oldDirs.iterator().next(); // Keep the first volume. assertThat( - "DN did not update its own config", dn.reconfigurePropertyImpl( - DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, newDirs), - is(dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY))); + DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, newDirs)) + .as("DN did not update its own config") + .isEqualTo(dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY)); assertFileLocksReleased( new ArrayList(oldDirs).subList(1, oldDirs.size())); @@ -475,10 +473,10 @@ public void testReAddVolumeWithBlocks() // Now add the original volume back again and ensure 15 blocks are reported assertThat( - "DN did not update its own config", dn.reconfigurePropertyImpl( - DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, String.join(",", oldDirs)), - is(dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY))); + DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, String.join(",", oldDirs))) + .as("DN did not update its own config") + .isEqualTo(dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY)); dn.scheduleAllBlockReport(0); blockReports = cluster.getAllBlockReports(bpid); @@ -497,7 +495,8 @@ public void testReAddVolumeWithBlocks() assertEquals(15, maxNumBlocks); } - @Test(timeout=60000) + @Test + @Timeout(value = 60) public void testAddVolumesDuringWrite() throws IOException, InterruptedException, TimeoutException, ReconfigurationException { @@ -540,7 +539,8 @@ public void testAddVolumesDuringWrite() assertEquals(expectedNumBlocks, actualNumBlocks); } - @Test(timeout=180000) + @Test + @Timeout(value = 180) public void testAddVolumesConcurrently() throws IOException, InterruptedException, TimeoutException, ReconfigurationException { @@ -572,7 +572,7 @@ public void testAddVolumesConcurrently() public void run() { while (addVolumeCompletionLatch.getCount() != newVolumeCount) { int i = 0; - while(i++ < 1000) { + while (i++ < 1000) { try { dn.getStorage().listStorageDirectories(); } catch (Exception e) { @@ -627,9 +627,10 @@ public void run() { listStorageThread.join(); // Verify errors while adding volumes and listing storage directories - Assert.assertEquals("Error adding volumes!", false, addVolumeError.get()); - Assert.assertEquals("Error listing storage!", - false, listStorageError.get()); + assertEquals(false, addVolumeError.get(), + "Error adding volumes!"); + assertEquals(false, listStorageError.get(), + "Error listing storage!"); int additionalBlockCount = 9; int totalBlockCount = initialBlockCount + additionalBlockCount; @@ -645,7 +646,8 @@ public void run() { assertEquals(numVolumes, blockReports.get(0).size()); } - @Test(timeout=60000) + @Test + @Timeout(value = 60) public void testAddVolumesToFederationNN() throws IOException, TimeoutException, InterruptedException, ReconfigurationException { @@ -680,7 +682,8 @@ public void testAddVolumesToFederationNN() Collections.frequency(actualNumBlocks.get(0), 0)); } - @Test(timeout=60000) + @Test + @Timeout(value = 60) public void testRemoveOneVolume() throws ReconfigurationException, InterruptedException, TimeoutException, IOException { @@ -693,10 +696,10 @@ public void testRemoveOneVolume() Collection oldDirs = getDataDirs(dn); String newDirs = oldDirs.iterator().next(); // Keep the first volume. assertThat( - "DN did not update its own config", dn.reconfigurePropertyImpl( - DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, newDirs), - is(dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY))); + DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, newDirs)) + .as("DN did not update its own config") + .isEqualTo(dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY)); assertFileLocksReleased( new ArrayList(oldDirs).subList(1, oldDirs.size())); dn.scheduleAllBlockReport(0); @@ -722,7 +725,8 @@ public void testRemoveOneVolume() assertEquals(10 / 2 + 6, blocksForVolume1.getNumberOfBlocks()); } - @Test(timeout=60000) + @Test + @Timeout(value = 60) public void testReplicatingAfterRemoveVolume() throws InterruptedException, TimeoutException, IOException, ReconfigurationException { @@ -750,10 +754,10 @@ public void testReplicatingAfterRemoveVolume() break; } assertThat( - "DN did not update its own config", dn.reconfigurePropertyImpl( - DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, newDirs), - is(dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY))); + DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, newDirs)) + .as("DN did not update its own config") + .isEqualTo(dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY)); oldDirs.remove(newDirs); assertFileLocksReleased(oldDirs); @@ -789,8 +793,8 @@ public void testAddVolumeFailures() throws IOException { String errorMessage = e.getCause().getMessage(); String messages[] = errorMessage.split("\\r?\\n"); assertEquals(2, messages.length); - assertThat(messages[0], containsString("new_vol0")); - assertThat(messages[1], containsString("new_vol2")); + assertThat(messages[0]).contains("new_vol0"); + assertThat(messages[1]).contains("new_vol2"); } // Make sure that vol0 and vol2's metadata are not left in memory. @@ -798,15 +802,16 @@ public void testAddVolumeFailures() throws IOException { try (FsDatasetSpi.FsVolumeReferences volumes = dataset.getFsVolumeReferences()) { for (FsVolumeSpi volume : volumes) { - assertThat(new File(volume.getStorageLocation().getUri()).toString(), - is(not(anyOf(is(newDirs.get(0)), is(newDirs.get(2)))))); + assertThat(new File(volume.getStorageLocation().getUri()).toString()) + .isNotIn(newDirs.get(0), newDirs.get(2)); } } DataStorage storage = dn.getStorage(); for (int i = 0; i < storage.getNumStorageDirs(); i++) { Storage.StorageDirectory sd = storage.getStorageDir(i); - assertThat(sd.getRoot().toString(), - is(not(anyOf(is(newDirs.get(0)), is(newDirs.get(2)))))); + assertThat( + sd.getRoot().toString()) + .isNotIn(newDirs.get(0), newDirs.get(2)); } // The newly effective conf does not have vol0 and vol2. @@ -815,9 +820,8 @@ public void testAddVolumeFailures() throws IOException { assertEquals(4, effectiveVolumes.length); for (String ev : effectiveVolumes) { assertThat( - new File(StorageLocation.parse(ev).getUri()).getCanonicalPath(), - is(not(anyOf(is(newDirs.get(0)), is(newDirs.get(2))))) - ); + new File(StorageLocation.parse(ev).getUri()).getCanonicalPath()) + .isNotIn(newDirs.get(0), newDirs.get(2)); } } @@ -840,7 +844,8 @@ private static void assertFileLocksReleased(Collection dirs) } } - @Test(timeout=600000) + @Test + @Timeout(value = 600) public void testRemoveVolumeBeingWritten() throws InterruptedException, TimeoutException, ReconfigurationException, IOException, BrokenBarrierException { @@ -931,10 +936,10 @@ public void logDelaySendingAckToUpstream( barrier.await(); assertThat( - "DN did not update its own config", dataNode.reconfigurePropertyImpl( - DFS_DATANODE_DATA_DIR_KEY, newDirs), - is(dataNode.getConf().get(DFS_DATANODE_DATA_DIR_KEY))); + DFS_DATANODE_DATA_DIR_KEY, newDirs)) + .as("DN did not update its own config") + .isEqualTo(dataNode.getConf().get(DFS_DATANODE_DATA_DIR_KEY)); done.set(true); } catch (ReconfigurationException | InterruptedException | @@ -968,8 +973,8 @@ public void logDelaySendingAckToUpstream( System.out.println("Vol: " + fsVolumeReferences.get(i).getBaseURI().toString()); } - assertEquals("Volume remove wasn't successful.", - 1, fsVolumeReferences.size()); + assertEquals(1, fsVolumeReferences.size(), + "Volume remove wasn't successful."); } // Verify the file has sufficient replications. @@ -990,8 +995,8 @@ public void logDelaySendingAckToUpstream( try (FsDatasetSpi.FsVolumeReferences fsVolumeReferences = fsDatasetSpi .getFsVolumeReferences()) { - assertEquals("Volume remove wasn't successful.", - 1, fsVolumeReferences.size()); + assertEquals(1, fsVolumeReferences.size(), + "Volume remove wasn't successful."); FsVolumeSpi volume = fsVolumeReferences.get(0); String bpid = cluster.getNamesystem().getBlockPoolId(); FsVolumeSpi.BlockIterator blkIter = volume.newBlockIterator(bpid, "test"); @@ -1000,12 +1005,13 @@ public void logDelaySendingAckToUpstream( blkIter.nextBlock(); blockCount++; } - assertTrue(String.format("DataNode(%d) should have more than 1 blocks", - dataNodeIdx), blockCount > 1); + assertTrue(blockCount > 1, + String.format("DataNode(%d) should have more than 1 blocks", dataNodeIdx)); } } - @Test(timeout=60000) + @Test + @Timeout(value = 60) public void testAddBackRemovedVolume() throws IOException, TimeoutException, InterruptedException, ReconfigurationException { @@ -1020,9 +1026,9 @@ public void testAddBackRemovedVolume() String removeDataDir = oldDataDir.split(",")[1]; assertThat( - "DN did not update its own config", - dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, keepDataDir), - is(dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY))); + dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, keepDataDir)) + .as("DN did not update its own config") + .isEqualTo(dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY)); for (int i = 0; i < cluster.getNumNameNodes(); i++) { String bpid = cluster.getNamesystem(i).getBlockPoolId(); BlockPoolSliceStorage bpsStorage = @@ -1040,9 +1046,9 @@ public void testAddBackRemovedVolume() // Bring the removed directory back. It only successes if all metadata about // this directory were removed from the previous step. assertThat( - "DN did not update its own config", - dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, oldDataDir), - is(dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY))); + dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, oldDataDir)) + .as("DN did not update its own config") + .isEqualTo(dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY)); } /** @@ -1050,7 +1056,8 @@ public void testAddBackRemovedVolume() * DataNode upon a volume failure. Thus we can run reconfig on the same * configuration to reload the new volume on the same directory as the failed one. */ - @Test(timeout=60000) + @Test + @Timeout(value = 60) public void testDirectlyReloadAfterCheckDiskError() throws Exception { // The test uses DataNodeTestUtils#injectDataDirFailure() to simulate @@ -1065,8 +1072,8 @@ public void testDirectlyReloadAfterCheckDiskError() File dirToFail = cluster.getInstanceStorageDir(0, 0); FsVolumeImpl failedVolume = DataNodeTestUtils.getVolume(dn, dirToFail); - assertTrue("No FsVolume was found for " + dirToFail, - failedVolume != null); + assertTrue(failedVolume != null, + "No FsVolume was found for " + dirToFail); long used = failedVolume.getDfsUsed(); DataNodeTestUtils.injectDataDirFailure(dirToFail); @@ -1079,9 +1086,9 @@ public void testDirectlyReloadAfterCheckDiskError() DataNodeTestUtils.restoreDataDirFromFailure(dirToFail); LOG.info("reconfiguring DN "); assertThat( - "DN did not update its own config", - dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, oldDataDir), - is(dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY))); + dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, oldDataDir)) + .as("DN did not update its own config") + .isEqualTo(dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY)); createFile(new Path("/test2"), 32, (short)2); FsVolumeImpl restoredVolume = DataNodeTestUtils.getVolume(dn, dirToFail); @@ -1092,7 +1099,8 @@ public void testDirectlyReloadAfterCheckDiskError() } /** Test that a full block report is sent after hot swapping volumes */ - @Test(timeout=100000) + @Test + @Timeout(value = 100) public void testFullBlockReportAfterRemovingVolumes() throws IOException, ReconfigurationException { @@ -1116,10 +1124,11 @@ public void testFullBlockReportAfterRemovingVolumes() // Remove a data dir from datanode File dataDirToKeep = cluster.getInstanceStorageDir(0, 0); assertThat( - "DN did not update its own config", dn.reconfigurePropertyImpl( - DFS_DATANODE_DATA_DIR_KEY, dataDirToKeep.toString()), - is(dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY))); + DFS_DATANODE_DATA_DIR_KEY, dataDirToKeep.toString())) + .as("DN did not update its own config") + .isEqualTo(dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY)); + // We should get 1 full report Mockito.verify(spy, timeout(60000).times(1)).blockReport( @@ -1129,7 +1138,8 @@ public void testFullBlockReportAfterRemovingVolumes() any(BlockReportContext.class)); } - @Test(timeout=60000) + @Test + @Timeout(value = 60) public void testAddVolumeWithVolumeOnSameMount() throws IOException { shutdown(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java index b4457d13c3927..bf72ea9ede52c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java @@ -24,7 +24,10 @@ import static org.apache.hadoop.test.MetricsAsserts.assertQuantileGauges; import static org.apache.hadoop.test.MetricsAsserts.getLongCounter; import static org.apache.hadoop.test.MetricsAsserts.getMetrics; -import static org.junit.jupiter.api.Assertions.*; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import static org.junit.jupiter.api.Assumptions.assumeTrue; import java.io.Closeable; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java index 73201ba6054d5..3692c4b7d9482 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java @@ -19,9 +19,9 @@ package org.apache.hadoop.hdfs.server.datanode; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_METRICS_LOGGER_PERIOD_SECONDS_KEY; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.File; import java.io.IOException; @@ -31,6 +31,7 @@ import java.util.Random; import java.util.concurrent.TimeoutException; +import org.junit.jupiter.api.Timeout; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -44,17 +45,15 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.log4j.Appender; import org.apache.log4j.AsyncAppender; -import org.junit.After; -import org.junit.Assert; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Test; import java.util.function.Supplier; /** * Test periodic logging of DataNode metrics. */ +@Timeout(300) public class TestDataNodeMetricsLogger { static final Logger LOG = LoggerFactory.getLogger(TestDataNodeMetricsLogger.class); @@ -69,9 +68,6 @@ public class TestDataNodeMetricsLogger { static final Random random = new Random(System.currentTimeMillis()); - @Rule - public Timeout timeout = new Timeout(300000); - /** * Starts an instance of DataNode * @@ -96,7 +92,7 @@ public void startDNForTest(boolean enableMetricsLogging) throws IOException { * @throws IOException * if an error occurred */ - @After + @AfterEach public void tearDown() throws IOException { if (dn != null) { try { @@ -106,8 +102,7 @@ public void tearDown() throws IOException { } finally { File dir = new File(DATA_DIR); if (dir.exists()) - Assert.assertTrue("Cannot delete data-node dirs", - FileUtil.fullyDelete(dir)); + assertTrue(FileUtil.fullyDelete(dir), "Cannot delete data-node dirs"); } } dn = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java index ef0562517dc8b..d36b79e659fc1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java @@ -22,7 +22,7 @@ import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNotSame; import static org.junit.jupiter.api.Assertions.assertTrue; - +import static org.junit.jupiter.api.Assertions.fail; import java.io.IOException; import java.net.InetSocketAddress; import java.util.List; @@ -44,7 +44,6 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.StringUtils; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -94,7 +93,7 @@ public void test2NNRegistration() throws IOException { // check number of volumes in fsdataset DataNode dn = cluster.getDataNodes().get(0); final Map volInfos = dn.data.getVolumeInfoMap(); - Assertions.assertTrue(volInfos.size() > 0, "No volumes in the fsdataset"); + assertTrue(volInfos.size() > 0, "No volumes in the fsdataset"); int i = 0; for (Map.Entry e : volInfos.entrySet()) { LOG.info("vol " + i++ + ") " + e.getKey() + ": " + e.getValue()); @@ -158,7 +157,7 @@ public void testFedSingleNN() throws IOException { // check number of vlumes in fsdataset DataNode dn = cluster.getDataNodes().get(0); final Map volInfos = dn.data.getVolumeInfoMap(); - Assertions.assertTrue(volInfos.size() > 0, "No volumes in the fsdataset"); + assertTrue(volInfos.size() > 0, "No volumes in the fsdataset"); int i = 0; for (Map.Entry e : volInfos.entrySet()) { LOG.info("vol " + i++ + ") " + e.getKey() + ": " + e.getValue()); @@ -205,14 +204,14 @@ public void testClusterIdMismatch() throws Exception { DataNode dn = cluster.getDataNodes().get(0); List bposs = dn.getAllBpOs(); LOG.info("dn bpos len (should be 2):" + bposs.size()); - Assertions.assertEquals(bposs.size(), 2, "should've registered with two namenodes"); + assertEquals(bposs.size(), 2, "should've registered with two namenodes"); // add another namenode cluster.addNameNode(conf, 9938); Thread.sleep(500);// lets wait for the registration to happen bposs = dn.getAllBpOs(); LOG.info("dn bpos len (should be 3):" + bposs.size()); - Assertions.assertEquals(bposs.size(), 3, "should've registered with three namenodes"); + assertEquals(bposs.size(), 3, "should've registered with three namenodes"); // change cluster id and another Namenode StartupOption.FORMAT.setClusterId("DifferentCID"); @@ -223,7 +222,7 @@ public void testClusterIdMismatch() throws Exception { Thread.sleep(500);// lets wait for the registration to happen bposs = dn.getAllBpOs(); LOG.info("dn bpos len (still should be 3):" + bposs.size()); - Assertions.assertEquals(3, bposs.size(), "should've registered with three namenodes"); + assertEquals(3, bposs.size(), "should've registered with three namenodes"); } finally { cluster.shutdown(); } @@ -322,12 +321,12 @@ public void testMiniDFSClusterWithMultipleNN() throws IOException { // add a node try { cluster.waitActive(); - Assertions.assertEquals(2, cluster.getNumNameNodes(), "(1)Should be 2 namenodes"); + assertEquals(2, cluster.getNumNameNodes(), "(1)Should be 2 namenodes"); cluster.addNameNode(conf, 0); - Assertions.assertEquals(3, cluster.getNumNameNodes(), "(1)Should be 3 namenodes"); + assertEquals(3, cluster.getNumNameNodes(), "(1)Should be 3 namenodes"); } catch (IOException ioe) { - Assertions.fail("Failed to add NN to cluster:" + StringUtils.stringifyException(ioe)); + fail("Failed to add NN to cluster:" + StringUtils.stringifyException(ioe)); } finally { cluster.shutdown(); } @@ -339,15 +338,15 @@ public void testMiniDFSClusterWithMultipleNN() throws IOException { .build(); try { - Assertions.assertNotNull(cluster); + assertNotNull(cluster); cluster.waitActive(); - Assertions.assertEquals(1, cluster.getNumNameNodes(), "(2)Should be 1 namenodes"); + assertEquals(1, cluster.getNumNameNodes(), "(2)Should be 1 namenodes"); // add a node cluster.addNameNode(conf, 0); - Assertions.assertEquals(2, cluster.getNumNameNodes(), "(2)Should be 2 namenodes"); + assertEquals(2, cluster.getNumNameNodes(), "(2)Should be 2 namenodes"); } catch (IOException ioe) { - Assertions.fail("Failed to add NN to cluster:" + StringUtils.stringifyException(ioe)); + fail("Failed to add NN to cluster:" + StringUtils.stringifyException(ioe)); } finally { cluster.shutdown(); } @@ -359,15 +358,15 @@ public void testMiniDFSClusterWithMultipleNN() throws IOException { // add a node try { cluster.waitActive(); - Assertions.assertNotNull(cluster); - Assertions.assertEquals(1, cluster.getNumNameNodes(), "(2)Should be 1 namenodes"); + assertNotNull(cluster); + assertEquals(1, cluster.getNumNameNodes(), "(2)Should be 1 namenodes"); cluster.addNameNode(conf, 9929); - Assertions.fail("shouldn't be able to add another NN to non federated cluster"); + fail("shouldn't be able to add another NN to non federated cluster"); } catch (IOException e) { // correct - Assertions.assertTrue(e.getMessage().startsWith("cannot add namenode")); - Assertions.assertEquals(1, cluster.getNumNameNodes(), "(3)Should be 1 namenodes"); + assertTrue(e.getMessage().startsWith("cannot add namenode")); + assertEquals(1, cluster.getNumNameNodes(), "(3)Should be 1 namenodes"); } finally { cluster.shutdown(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java index 1101368ecbe19..8a6e59a509589 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java @@ -58,6 +58,7 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; @@ -82,7 +83,6 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl; import org.apache.hadoop.test.LambdaTestUtils; import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -114,7 +114,7 @@ public void tearDown() throws Exception { File dir = new File(DATA_DIR); if (dir.exists()) - Assertions.assertTrue(FileUtil.fullyDelete(dir), + assertTrue(FileUtil.fullyDelete(dir), "Cannot delete data-node dirs"); } @@ -260,7 +260,7 @@ public void testFailedDecreaseConcurrentMovers() // Attempt to set new maximum to 1 final boolean success = dataNode.xserver.updateBalancerMaxConcurrentMovers(1); - Assertions.assertFalse(success); + assertFalse(success); } finally { dataNode.shutdown(); } @@ -272,7 +272,7 @@ public void testFailedDecreaseConcurrentMovers() @Test public void testFailedDecreaseConcurrentMoversReconfiguration() throws IOException, ReconfigurationException { - Assertions.assertThrows(ReconfigurationException.class, () -> { + assertThrows(ReconfigurationException.class, () -> { final DataNode[] dns = createDNsForTest(1); final DataNode dataNode = dns[0]; try { @@ -289,9 +289,9 @@ public void testFailedDecreaseConcurrentMoversReconfiguration() dataNode.reconfigurePropertyImpl( DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY, "1"); } catch (ReconfigurationException e) { - Assertions.assertEquals(DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY, + assertEquals(DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY, e.getProperty()); - Assertions.assertEquals("1", e.getNewValue()); + assertEquals("1", e.getNewValue()); throw e; } finally { dataNode.shutdown(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeRollingUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeRollingUpgrade.java index fdde4cabe6bb7..291f446b4d662 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeRollingUpgrade.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeRollingUpgrade.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hdfs.server.datanode; -import static org.hamcrest.core.Is.is; -import static org.junit.Assert.*; +import static org.junit.jupiter.api.Assertions.*; +import static org.assertj.core.api.Assertions.assertThat; import java.io.File; import java.io.IOException; @@ -27,8 +27,7 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; -import org.junit.Rule; -import org.junit.rules.TemporaryFolder; +import org.junit.jupiter.api.io.TempDir; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -51,7 +50,8 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.tools.DFSAdmin; import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Test; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import org.mockito.Mockito; /** @@ -67,8 +67,9 @@ public class TestDataNodeRollingUpgrade { private static final long FILE_SIZE = BLOCK_SIZE; private static final long SEED = 0x1BADF00DL; - @Rule - public TemporaryFolder baseDir = new TemporaryFolder(); + @SuppressWarnings("checkstyle:VisibilityModifier") + @TempDir + public java.nio.file.Path baseDir; Configuration conf; MiniDFSCluster cluster = null; @@ -80,7 +81,7 @@ public class TestDataNodeRollingUpgrade { private void startCluster() throws IOException { conf = new HdfsConfiguration(); conf.setInt("dfs.blocksize", 1024*1024); - cluster = new Builder(conf, baseDir.getRoot()).numDataNodes(REPL_FACTOR).build(); + cluster = new Builder(conf, baseDir.toFile()).numDataNodes(REPL_FACTOR).build(); cluster.waitActive(); fs = cluster.getFileSystem(); nn = cluster.getNameNode(0); @@ -112,8 +113,8 @@ private void triggerHeartBeats() throws Exception { private File getBlockForFile(Path path, boolean exists) throws IOException { LocatedBlocks blocks = nn.getRpcServer().getBlockLocations(path.toString(), 0, Long.MAX_VALUE); - assertEquals("The test helper functions assume that each file has a single block", - 1, blocks.getLocatedBlocks().size()); + assertEquals(1, blocks.getLocatedBlocks().size(), + "The test helper functions assume that each file has a single block"); ExtendedBlock block = blocks.getLocatedBlocks().get(0).getBlock(); BlockLocalPathInfo bInfo = dn0.getFSDataset().getBlockLocalPathInfo(block); File blockFile = new File(bInfo.getBlockPath()); @@ -212,7 +213,8 @@ private void rollbackRollingUpgrade() throws Exception { LOG.info("The cluster is active after rollback"); } - @Test (timeout=600000) + @Test + @Timeout(value = 600) public void testDatanodeRollingUpgradeWithFinalize() throws Exception { try { startCluster(); @@ -224,7 +226,8 @@ public void testDatanodeRollingUpgradeWithFinalize() throws Exception { } } - @Test(timeout = 600000) + @Test + @Timeout(value = 600) public void testDatanodeRUwithRegularUpgrade() throws Exception { try { startCluster(); @@ -263,7 +266,8 @@ private void rollingUpgradeAndFinalize() throws IOException, Exception { assert(fs.exists(testFile1)); } - @Test (timeout=600000) + @Test + @Timeout(value = 600) public void testDatanodeRollingUpgradeWithRollback() throws Exception { try { startCluster(); @@ -288,13 +292,14 @@ public void testDatanodeRollingUpgradeWithRollback() throws Exception { // Ensure that files exist and restored file contents are the same. assert(fs.exists(testFile1)); String fileContents2 = DFSTestUtil.readFile(fs, testFile1); - assertThat(fileContents1, is(fileContents2)); + assertThat(fileContents1).isEqualTo(fileContents2); } finally { shutdownCluster(); } } - @Test (timeout=600000) + @Test + @Timeout(value = 600) // Test DatanodeXceiver has correct peer-dataxceiver pairs for sending OOB message public void testDatanodePeersXceiver() throws Exception { try { @@ -342,7 +347,8 @@ public void testDatanodePeersXceiver() throws Exception { * Support for layout version change with rolling upgrade was * added by HDFS-6800 and HDFS-6981. */ - @Test(timeout=300000) + @Test + @Timeout(value = 300) public void testWithLayoutChangeAndFinalize() throws Exception { final long seed = 0x600DF00D; try { @@ -402,7 +408,8 @@ public void testWithLayoutChangeAndFinalize() throws Exception { * Support for layout version change with rolling upgrade was * added by HDFS-6800 and HDFS-6981. */ - @Test(timeout=300000) + @Test + @Timeout(value = 300) public void testWithLayoutChangeAndRollback() throws Exception { final long seed = 0x600DF00D; try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeUUID.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeUUID.java index 80cc7d5cdc3cf..3e144a7836fc4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeUUID.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeUUID.java @@ -26,15 +26,16 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Test; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import java.io.File; import java.net.InetSocketAddress; import java.util.ArrayList; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; public class TestDataNodeUUID { @@ -68,7 +69,8 @@ public void testDatanodeUuid() throws Exception { assertNotEquals(dn.getDatanodeUuid(), nullString); } - @Test(timeout = 10000) + @Test + @Timeout(value = 10) public void testUUIDRegeneration() throws Exception { File baseDir = GenericTestUtils.getTestDir(); File disk1 = new File(baseDir, "disk1"); @@ -96,19 +98,16 @@ public void testUUIDRegeneration() throws Exception { // on the second disk MiniDFSCluster.DataNodeProperties dn = cluster.stopDataNode(0); FileUtils.deleteDirectory(disk2); - assertTrue("Failed to recreate the data directory: " + disk2, - disk2.mkdirs()); + assertTrue(disk2.mkdirs(), "Failed to recreate the data directory: " + disk2); // Restart and check if the UUID changed - assertTrue("DataNode failed to start up: " + dn, - cluster.restartDataNode(dn)); + assertTrue(cluster.restartDataNode(dn), "DataNode failed to start up: " + dn); // We need to wait until the DN has completed registration while (!cluster.getDataNodes().get(0).isDatanodeFullyStarted()) { Thread.sleep(50); } - assertEquals( - "DN generated a new UUID despite disk1 having it intact", - originalUUID, cluster.getDataNodes().get(0).getDatanodeUuid()); + assertEquals(originalUUID, cluster.getDataNodes().get(0).getDatanodeUuid(), + "DN generated a new UUID despite disk1 having it intact"); } finally { if (cluster != null) { cluster.shutdown(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java index 91521f9be1e47..42a93d7e50415 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java @@ -58,7 +58,6 @@ import org.apache.hadoop.test.GenericTestUtils; import org.slf4j.event.Level; import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -532,7 +531,7 @@ public void testHotSwapOutFailedVolumeAndReporting() "Hadoop:service=DataNode,name=FSDatasetState-" + dn0.getDatanodeUuid()); int numFailedVolumes = (int) mbs.getAttribute(mxbeanName, "NumFailedVolumes"); - Assertions.assertEquals(dn0.getFSDataset().getNumFailedVolumes(), numFailedVolumes); + assertEquals(dn0.getFSDataset().getNumFailedVolumes(), numFailedVolumes); checkFailuresAtDataNode(dn0, 0, false, new String[] {}); // Fail dn0Vol1 first. @@ -541,8 +540,8 @@ public void testHotSwapOutFailedVolumeAndReporting() DataNodeTestUtils.waitForDiskError(dn0, DataNodeTestUtils.getVolume(dn0, dn0Vol1)); numFailedVolumes = (int) mbs.getAttribute(mxbeanName, "NumFailedVolumes"); - Assertions.assertEquals(1, numFailedVolumes); - Assertions.assertEquals(dn0.getFSDataset().getNumFailedVolumes(), numFailedVolumes); + assertEquals(1, numFailedVolumes); + assertEquals(dn0.getFSDataset().getNumFailedVolumes(), numFailedVolumes); checkFailuresAtDataNode(dn0, 1, true, new String[] {dn0Vol1.getAbsolutePath()}); @@ -553,12 +552,12 @@ public void testHotSwapOutFailedVolumeAndReporting() oldDataDirs); fail("Reconfigure with failed disk should throw exception."); } catch (ReconfigurationException e) { - Assertions.assertTrue(e.getCause().getMessage().contains(dn0Vol1.getAbsolutePath()), + assertTrue(e.getCause().getMessage().contains(dn0Vol1.getAbsolutePath()), "Reconfigure exception doesn't have expected path!"); } numFailedVolumes = (int) mbs.getAttribute(mxbeanName, "NumFailedVolumes"); - Assertions.assertEquals(1, numFailedVolumes); - Assertions.assertEquals(dn0.getFSDataset().getNumFailedVolumes(), numFailedVolumes); + assertEquals(1, numFailedVolumes); + assertEquals(dn0.getFSDataset().getNumFailedVolumes(), numFailedVolumes); checkFailuresAtDataNode(dn0, 1, true, new String[] {dn0Vol1.getAbsolutePath()}); @@ -568,8 +567,8 @@ public void testHotSwapOutFailedVolumeAndReporting() dn0.reconfigurePropertyImpl(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDirs); numFailedVolumes = (int) mbs.getAttribute(mxbeanName, "NumFailedVolumes"); - Assertions.assertEquals(0, numFailedVolumes); - Assertions.assertEquals(dn0.getFSDataset().getNumFailedVolumes(), + assertEquals(0, numFailedVolumes); + assertEquals(dn0.getFSDataset().getNumFailedVolumes(), numFailedVolumes); checkFailuresAtDataNode(dn0, 0, true, new String[] {}); @@ -579,8 +578,8 @@ public void testHotSwapOutFailedVolumeAndReporting() dn0.reconfigurePropertyImpl(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, oldDataDirs); numFailedVolumes = (int) mbs.getAttribute(mxbeanName, "NumFailedVolumes"); - Assertions.assertEquals(0, numFailedVolumes); - Assertions.assertEquals(dn0.getFSDataset().getNumFailedVolumes(), + assertEquals(0, numFailedVolumes); + assertEquals(dn0.getFSDataset().getNumFailedVolumes(), numFailedVolumes); checkFailuresAtDataNode(dn0, 0, true, new String[] {}); @@ -590,8 +589,8 @@ public void testHotSwapOutFailedVolumeAndReporting() DataNodeTestUtils.waitForDiskError(dn0, DataNodeTestUtils.getVolume(dn0, dn0Vol2)); numFailedVolumes = (int) mbs.getAttribute(mxbeanName, "NumFailedVolumes"); - Assertions.assertEquals(1, numFailedVolumes); - Assertions.assertEquals(dn0.getFSDataset().getNumFailedVolumes(), + assertEquals(1, numFailedVolumes); + assertEquals(dn0.getFSDataset().getNumFailedVolumes(), numFailedVolumes); checkFailuresAtDataNode(dn0, 1, true, new String[] {dn0Vol2.getAbsolutePath()}); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java index 6165f05ee54a8..7819337bb5c6c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java @@ -18,9 +18,9 @@ package org.apache.hadoop.hdfs.server.datanode; import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.File; import java.io.IOException; @@ -37,15 +37,16 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; import org.apache.hadoop.test.GenericTestUtils; -import org.junit.After; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; /** * Test the ability of a DN to tolerate volume failures. + * specific the timeout for entire test class */ +@Timeout(120) public class TestDataNodeVolumeFailureToleration { private FileSystem fs; private MiniDFSCluster cluster; @@ -59,11 +60,7 @@ public class TestDataNodeVolumeFailureToleration { // a datanode to be considered dead by the namenode. final int WAIT_FOR_DEATH = 15000; - // specific the timeout for entire test class - @Rule - public Timeout timeout = new Timeout(120 * 1000); - - @Before + @BeforeEach public void setUp() throws Exception { conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512L); @@ -81,7 +78,7 @@ public void setUp() throws Exception { fs = cluster.getFileSystem(); } - @After + @AfterEach public void tearDown() throws Exception { if (cluster != null) { cluster.shutdown(); @@ -122,14 +119,12 @@ public void testValidVolumesAtStartup() throws Exception { cluster.waitActive(); try { - assertTrue("The DN should have started up fine.", - cluster.isDataNodeUp()); + assertTrue(cluster.isDataNodeUp(), "The DN should have started up fine."); DataNode dn = cluster.getDataNodes().get(0); String si = DataNodeTestUtils.getFSDataset(dn).getStorageInfo(); - assertTrue("The DN should have started with this directory", - si.contains(dataDir1Actual.getPath())); - assertFalse("The DN shouldn't have a bad directory.", - si.contains(dataDir2Actual.getPath())); + assertTrue(si.contains(dataDir1Actual.getPath()), + "The DN should have started with this directory"); + assertFalse(si.contains(dataDir2Actual.getPath()), "The DN shouldn't have a bad directory."); } finally { cluster.shutdownDataNodes(); FileUtil.chmod(dataDir2.toString(), "755"); @@ -272,8 +267,7 @@ private void testVolumeConfig(int volumesTolerated, int volumesFailed, private void prepareDirToFail(File dir) throws IOException, InterruptedException { dir.mkdirs(); - assertEquals("Couldn't chmod local vol", 0, - FileUtil.chmod(dir.toString(), "000")); + assertEquals(0, FileUtil.chmod(dir.toString(), "000"), "Couldn't chmod local vol"); } /** @@ -292,8 +286,8 @@ public void testFailedVolumeOnStartupIsCounted() throws Exception { prepareDirToFail(dir); restartDatanodes(1, false); // The cluster is up.. - assertEquals(true, cluster.getDataNodes().get(0) - .isBPServiceAlive(cluster.getNamesystem().getBlockPoolId())); + assertEquals(true, + cluster.getDataNodes().get(0).isBPServiceAlive(cluster.getNamesystem().getBlockPoolId())); // but there has been a single volume failure DFSTestUtil.waitForDatanodeStatus(dm, 1, 0, 1, origCapacity / 2, WAIT_FOR_HEARTBEATS); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataTransferThrottler.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataTransferThrottler.java index bdcfef81d02e5..b18ea7b1c08ab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataTransferThrottler.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataTransferThrottler.java @@ -22,11 +22,12 @@ import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import static org.apache.hadoop.util.Time.monotonicNow; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_READ_BANDWIDTHPERSEC_KEY; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; /** @@ -54,10 +55,10 @@ public void testReadDataTransferThrottler() throws Exception { DataNode dataNode = cluster.getDataNodes().get(0); // DataXceiverServer#readThrottler is null if // dfs.datanode.data.read.bandwidthPerSec default value is 0. - Assertions.assertNull(dataNode.xserver.getReadThrottler()); + assertNull(dataNode.xserver.getReadThrottler()); // Read file. - Assertions.assertEquals(fileLength, DFSTestUtil.readFileAsBytes(fs, file).length); + assertEquals(fileLength, DFSTestUtil.readFileAsBytes(fs, file).length); // Set dfs.datanode.data.read.bandwidthPerSec. long bandwidthPerSec = 1024 * 1024 * 8; @@ -67,11 +68,11 @@ public void testReadDataTransferThrottler() throws Exception { cluster.stopDataNode(0); cluster.startDataNodes(conf, 1, true, null, null); dataNode = cluster.getDataNodes().get(0); - Assertions.assertEquals(bandwidthPerSec, dataNode.xserver.getReadThrottler().getBandwidth()); + assertEquals(bandwidthPerSec, dataNode.xserver.getReadThrottler().getBandwidth()); // Read file with throttler. long start = monotonicNow(); - Assertions.assertEquals(fileLength, DFSTestUtil.readFileAsBytes(fs, file).length); + assertEquals(fileLength, DFSTestUtil.readFileAsBytes(fs, file).length); long elapsedTime = monotonicNow() - start; // Ensure throttler is effective, read 1024 * 1024 * 10 * 8 bytes, // should take approximately 10 seconds (1024 * 1024 * 8 bytes per second). diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataXceiverLazyPersistHint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataXceiverLazyPersistHint.java index 611360d6cb15b..1acea362d7678 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataXceiverLazyPersistHint.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataXceiverLazyPersistHint.java @@ -29,17 +29,15 @@ import org.apache.hadoop.util.DataChecksum; import static org.apache.hadoop.hdfs.DFSConfigKeys.*; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import org.mockito.ArgumentCaptor; import java.io.DataOutputStream; import java.io.IOException; import java.util.Arrays; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.core.Is.is; +import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.ArgumentMatchers.anyLong; @@ -54,9 +52,8 @@ * Mock-based unit test to verify that the DataXceiver correctly handles the * LazyPersist hint from clients. */ +@Timeout(300) public class TestDataXceiverLazyPersistHint { - @Rule - public Timeout timeout = new Timeout(300000); private enum PeerLocality { LOCAL, @@ -80,7 +77,7 @@ public void testWithLocalClient() throws IOException { for (Boolean lazyPersistSetting : Arrays.asList(true, false)) { issueWriteBlockCall(xceiver, lazyPersistSetting); - assertThat(captor.getValue(), is(lazyPersistSetting)); + assertThat(captor.getValue()).isEqualTo(lazyPersistSetting); } } @@ -95,7 +92,7 @@ public void testWithRemoteClient() throws IOException { for (Boolean lazyPersistSetting : Arrays.asList(true, false)) { issueWriteBlockCall(xceiver, lazyPersistSetting); - assertThat(captor.getValue(), is(false)); + assertThat(captor.getValue()).isEqualTo(false); } } @@ -112,7 +109,7 @@ public void testOverrideWithRemoteClient() throws IOException { for (Boolean lazyPersistSetting : Arrays.asList(true, false)) { issueWriteBlockCall(xceiver, lazyPersistSetting); - assertThat(captor.getValue(), is(lazyPersistSetting)); + assertThat(captor.getValue()).isEqualTo(lazyPersistSetting); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java index d118c7e50c231..dcc21c8e651b2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hdfs.server.datanode; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -51,7 +53,6 @@ import org.apache.hadoop.test.GenericTestUtils; import org.slf4j.event.Level; import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -118,7 +119,7 @@ public void tearDown() throws IOException { } finally { File dir = new File(DATA_DIR); if (dir.exists()) - Assertions.assertTrue(FileUtil.fullyDelete(dir), + assertTrue(FileUtil.fullyDelete(dir), "Cannot delete data-node dirs"); } tearDownDone = true; @@ -224,7 +225,7 @@ public HeartbeatResponse answer(InvocationOnMock invocation) @Override DatanodeProtocolClientSideTranslatorPB connectToNN( InetSocketAddress nnAddr) throws IOException { - Assertions.assertEquals(NN_ADDR, nnAddr); + assertEquals(NN_ADDR, nnAddr); return namenode; } }; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java index 52905d833bf62..db3a79a07face 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java @@ -43,7 +43,6 @@ import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.VersionInfo; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.slf4j.Logger; @@ -157,7 +156,7 @@ public void testDNShutdwonBeforeRegister() throws Exception { localActor.stop(); localActor.register(nsInfo); } catch (IOException e) { - Assertions.assertEquals("DN shut down before block pool registered", e.getMessage()); + assertEquals("DN shut down before block pool registered", e.getMessage()); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCacheRevocation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCacheRevocation.java index fd72804804b6a..55a7ba2c38f92 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCacheRevocation.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCacheRevocation.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.server.datanode; import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows; -import static org.junit.Assume.assumeTrue; +import static org.junit.jupiter.api.Assumptions.assumeTrue; import java.io.File; import java.nio.ByteBuffer; @@ -45,9 +45,10 @@ import org.apache.hadoop.net.unix.DomainSocket; import org.apache.hadoop.net.unix.TemporarySocketDirectory; import org.apache.hadoop.util.NativeCodeLoader; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -64,7 +65,7 @@ public class TestFsDatasetCacheRevocation { private static final int BLOCK_SIZE = 4096; - @Before + @BeforeEach public void setUp() throws Exception { prevCacheManipulator = NativeIO.POSIX.getCacheManipulator(); NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator()); @@ -72,7 +73,7 @@ public void setUp() throws Exception { sockDir = new TemporarySocketDirectory(); } - @After + @AfterEach public void tearDown() throws Exception { // Restore the original CacheManipulator NativeIO.POSIX.setCacheManipulator(prevCacheManipulator); @@ -99,7 +100,8 @@ private static Configuration getDefaultConf() { * replica for a reasonable amount of time, even if an uncache request * occurs. */ - @Test(timeout=120000) + @Test + @Timeout(value = 120) public void testPinning() throws Exception { assumeTrue(NativeCodeLoader.isNativeCodeLoaded()); assumeNotWindows(); @@ -149,7 +151,8 @@ public void testPinning() throws Exception { * Test that when we have an uncache request, and the client refuses to * release the replica for a long time, we will un-mlock it. */ - @Test(timeout=120000) + @Test + @Timeout(value = 120) public void testRevocation() throws Exception { assumeTrue(NativeCodeLoader.isNativeCodeLoaded()); assumeNotWindows(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBlockReports.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBlockReports.java index 6551de4e793e5..5338ac3035e32 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBlockReports.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBlockReports.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs.server.datanode; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.atLeastOnce; @@ -54,8 +54,9 @@ import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus; import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import org.mockito.Mockito; /** @@ -82,7 +83,7 @@ public class TestIncrementalBlockReports { private BPServiceActor actor; // BPSA to use for block injection. private String storageUuid; // DatanodeStorage to use for block injection. - @Before + @BeforeEach public void startCluster() throws IOException { conf = new HdfsConfiguration(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DN_COUNT).build(); @@ -135,7 +136,8 @@ DatanodeProtocolClientSideTranslatorPB spyOnDnCallsToNn() { * @throws InterruptedException * @throws IOException */ - @Test (timeout=60000) + @Test + @Timeout(value = 60) public void testReportBlockReceived() throws InterruptedException, IOException { try { DatanodeProtocolClientSideTranslatorPB nnSpy = spyOnDnCallsToNn(); @@ -162,7 +164,8 @@ public void testReportBlockReceived() throws InterruptedException, IOException { * @throws InterruptedException * @throws IOException */ - @Test (timeout=60000) + @Test + @Timeout(value = 60) public void testReportBlockDeleted() throws InterruptedException, IOException { try { // Trigger a block report to reset the IBR timer. @@ -207,7 +210,8 @@ public void testReportBlockDeleted() throws InterruptedException, IOException { * @throws InterruptedException * @throws IOException */ - @Test (timeout=60000) + @Test + @Timeout(value = 60) public void testReplaceReceivedBlock() throws InterruptedException, IOException { try { // Spy on calls from the DN to the NN @@ -326,9 +330,9 @@ public void testIBRRaceCondition() throws Exception { cluster.transitionToActive(1); cluster.waitActive(1); - assertEquals("There should not be any corrupt replicas", 0, - nn2.getNamesystem().getBlockManager() - .numCorruptReplicas(block.getLocalBlock())); + assertEquals(0, + nn2.getNamesystem().getBlockManager().numCorruptReplicas(block.getLocalBlock()), + "There should not be any corrupt replicas"); } finally { cluster.shutdown(); } @@ -409,9 +413,9 @@ public void testIBRRaceCondition2() throws Exception { cluster.transitionToActive(1); cluster.waitActive(1); - assertEquals("There should not be any corrupt replicas", 0, - nn2.getNamesystem().getBlockManager() - .numCorruptReplicas(block.getLocalBlock())); + assertEquals(0, + nn2.getNamesystem().getBlockManager().numCorruptReplicas(block.getLocalBlock()), + "There should not be any corrupt replicas"); } finally { cluster.shutdown(); } @@ -514,9 +518,9 @@ public void testIBRRaceCondition3() throws Exception { cluster.transitionToActive(1); cluster.waitActive(1); - assertEquals("There should be 1 corrupt replica", 1, - nn2.getNamesystem().getBlockManager() - .numCorruptReplicas(block.getLocalBlock())); + assertEquals(1, + nn2.getNamesystem().getBlockManager().numCorruptReplicas(block.getLocalBlock()), + "There should be 1 corrupt replica"); } finally { cluster.shutdown(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestProvidedReplicaImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestProvidedReplicaImpl.java index a7e8b1eb213cf..1ba682424ba00 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestProvidedReplicaImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestProvidedReplicaImpl.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs.server.datanode; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.File; import java.io.FileInputStream; @@ -35,8 +35,8 @@ import org.apache.commons.io.input.BoundedInputStream; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystemTestHelper; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -90,7 +90,7 @@ private static void createProvidedReplicas(Configuration conf) { } } - @Before + @BeforeEach public void setUp() throws IOException { createFileIfNotExists(new File(BASE_DIR).getAbsolutePath()); createProvidedReplicas(new Configuration()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestTransferRbw.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestTransferRbw.java index 8653f4b208f49..c08201286ec9e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestTransferRbw.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestTransferRbw.java @@ -38,11 +38,13 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.slf4j.event.Level; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_WRITE_BANDWIDTHPERSEC_KEY; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; /** Test transferring RBW between datanodes */ public class TestTransferRbw { @@ -68,9 +70,9 @@ private static LocalReplicaInPipeline getReplica(final DataNode datanode, LOG.info("wait since replicas.size() == 0; i=" + i); Thread.sleep(1000); } - Assert.assertEquals(1, replicas.size()); + assertEquals(1, replicas.size()); final ReplicaInfo r = replicas.iterator().next(); - Assert.assertEquals(expectedState, r.getState()); + assertEquals(expectedState, r.getState()); return (LocalReplicaInPipeline)r; } @@ -106,7 +108,7 @@ public void testTransferRbw() throws Exception { final DataNode oldnode = cluster.getDataNodes().get(0); // DataXceiverServer#writeThrottler is null if // dfs.datanode.data.write.bandwidthPerSec default value is 0. - Assert.assertNull(oldnode.xserver.getWriteThrottler()); + assertNull(oldnode.xserver.getWriteThrottler()); oldrbw = getRbw(oldnode, bpid); LOG.info("oldrbw = " + oldrbw); @@ -118,17 +120,17 @@ public void testTransferRbw() throws Exception { // DataXceiverServer#writeThrottler#balancer is equal to // dfs.datanode.data.write.bandwidthPerSec value if // dfs.datanode.data.write.bandwidthPerSec value is not zero. - Assert.assertEquals(1024 * 1024 * 8, + assertEquals(1024 * 1024 * 8, newnode.xserver.getWriteThrottler().getBandwidth()); final DatanodeInfo oldnodeinfo; { final DatanodeInfo[] datatnodeinfos = cluster.getNameNodeRpc( ).getDatanodeReport(DatanodeReportType.LIVE); - Assert.assertEquals(2, datatnodeinfos.length); + assertEquals(2, datatnodeinfos.length); int i = 0; for(DatanodeRegistration dnReg = newnode.getDNRegistrationForBP(bpid); i < datatnodeinfos.length && !datatnodeinfos[i].equals(dnReg); i++); - Assert.assertTrue(i < datatnodeinfos.length); + assertTrue(i < datatnodeinfos.length); newnodeinfo = datatnodeinfos[i]; oldnodeinfo = datatnodeinfos[1 - i]; } @@ -138,15 +140,15 @@ public void testTransferRbw() throws Exception { oldrbw.getGenerationStamp()); final BlockOpResponseProto s = DFSTestUtil.transferRbw( b, DFSClientAdapter.getDFSClient(fs), oldnodeinfo, newnodeinfo); - Assert.assertEquals(Status.SUCCESS, s.getStatus()); + assertEquals(Status.SUCCESS, s.getStatus()); } //check new rbw final ReplicaBeingWritten newrbw = getRbw(newnode, bpid); LOG.info("newrbw = " + newrbw); - Assert.assertEquals(oldrbw.getBlockId(), newrbw.getBlockId()); - Assert.assertEquals(oldrbw.getGenerationStamp(), newrbw.getGenerationStamp()); - Assert.assertEquals(oldrbw.getVisibleLength(), newrbw.getVisibleLength()); + assertEquals(oldrbw.getBlockId(), newrbw.getBlockId()); + assertEquals(oldrbw.getGenerationStamp(), newrbw.getGenerationStamp()); + assertEquals(oldrbw.getVisibleLength(), newrbw.getVisibleLength()); LOG.info("DONE"); } finally { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeChecker.java index fd05a4702fef1..369fd5ea42ecd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeChecker.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.server.datanode.checker; +import org.apache.hadoop.test.TestName; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Futures; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableFuture; @@ -29,12 +30,10 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.DiskChecker.DiskErrorException; import org.apache.hadoop.util.FakeTimer; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TestName; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.junit.runners.Parameterized.Parameters; +import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.api.extension.RegisterExtension; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -51,21 +50,20 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY; import static org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult.*; -import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.*; +import static org.junit.jupiter.api.Assertions.*; import static org.mockito.Mockito.*; - +import static org.assertj.core.api.Assertions.assertThat; /** * Tests for {@link DatasetVolumeChecker} when the {@link FsVolumeSpi#check} * method returns different values of {@link VolumeCheckResult}. */ -@RunWith(Parameterized.class) public class TestDatasetVolumeChecker { public static final Logger LOG = LoggerFactory.getLogger(TestDatasetVolumeChecker.class); - @Rule + @SuppressWarnings("checkstyle:VisibilityModifier") + @RegisterExtension public TestName testName = new TestName(); /** @@ -73,7 +71,6 @@ public class TestDatasetVolumeChecker { * Including "null" for 'throw exception'. * @return */ - @Parameters(name="{0}") public static Collection data() { List values = new ArrayList<>(); for (VolumeCheckResult result : VolumeCheckResult.values()) { @@ -86,12 +83,12 @@ public static Collection data() { /** * When null, the check call should throw an exception. */ - private final VolumeCheckResult expectedVolumeHealth; + private VolumeCheckResult expectedVolumeHealth; private static final int NUM_VOLUMES = 2; - public TestDatasetVolumeChecker(VolumeCheckResult expectedVolumeHealth) { - this.expectedVolumeHealth = expectedVolumeHealth; + public void initTestDatasetVolumeChecker(VolumeCheckResult pExpectedVolumeHealth) { + this.expectedVolumeHealth = pExpectedVolumeHealth; } /** @@ -100,8 +97,11 @@ public TestDatasetVolumeChecker(VolumeCheckResult expectedVolumeHealth) { * * @throws Exception */ - @Test(timeout = 10000) - public void testCheckOneVolume() throws Exception { + @ParameterizedTest(name="{0}") + @MethodSource("data") + @Timeout(value = 10) + public void testCheckOneVolume(VolumeCheckResult pExpectedVolumeHealth) throws Exception { + initTestDatasetVolumeChecker(pExpectedVolumeHealth); LOG.info("Executing {}", testName.getMethodName()); final FsVolumeSpi volume = makeVolumes(1, expectedVolumeHealth).get(0); final DatasetVolumeChecker checker = @@ -120,11 +120,11 @@ public void call(Set healthyVolumes, numCallbackInvocations.incrementAndGet(); if (expectedVolumeHealth != null && expectedVolumeHealth != FAILED) { - assertThat(healthyVolumes.size(), is(1)); - assertThat(failedVolumes.size(), is(0)); + assertThat(healthyVolumes.size()).isEqualTo(1); + assertThat(failedVolumes.size()).isEqualTo(0); } else { - assertThat(healthyVolumes.size(), is(0)); - assertThat(failedVolumes.size(), is(1)); + assertThat(healthyVolumes.size()).isEqualTo(0); + assertThat(failedVolumes.size()).isEqualTo(1); } } }); @@ -134,7 +134,7 @@ public void call(Set healthyVolumes, // Ensure that the check was invoked at least once. verify(volume, times(1)).check(any()); if (result) { - assertThat(numCallbackInvocations.get(), is(1L)); + assertThat(numCallbackInvocations.get()).isEqualTo(1L); } } @@ -144,8 +144,11 @@ public void call(Set healthyVolumes, * * @throws Exception */ - @Test(timeout = 10000) - public void testCheckAllVolumes() throws Exception { + @ParameterizedTest(name="{0}") + @MethodSource("data") + @Timeout(value = 10) + public void testCheckAllVolumes(VolumeCheckResult pExpectedVolumeHealth) throws Exception { + initTestDatasetVolumeChecker(pExpectedVolumeHealth); LOG.info("Executing {}", testName.getMethodName()); final List volumes = makeVolumes( @@ -159,7 +162,7 @@ public void testCheckAllVolumes() throws Exception { LOG.info("Got back {} failed volumes", failedVolumes.size()); if (expectedVolumeHealth == null || expectedVolumeHealth == FAILED) { - assertThat(failedVolumes.size(), is(NUM_VOLUMES)); + assertThat(failedVolumes.size()).isEqualTo(NUM_VOLUMES); } else { assertTrue(failedVolumes.isEmpty()); } @@ -233,8 +236,11 @@ static List makeVolumes( return volumes; } - @Test - public void testInvalidConfigurationValues() throws Exception { + @ParameterizedTest(name="{0}") + @MethodSource("data") + public void testInvalidConfigurationValues(VolumeCheckResult pExpectedVolumeHealth) + throws Exception { + initTestDatasetVolumeChecker(pExpectedVolumeHealth); HdfsConfiguration conf = new HdfsConfiguration(); conf.setInt(DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY, 0); intercept(HadoopIllegalArgumentException.class, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeCheckerFailures.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeCheckerFailures.java index 7eb79b70ae642..9d7d4ba30534c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeCheckerFailures.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeCheckerFailures.java @@ -24,8 +24,9 @@ import org.apache.hadoop.hdfs.server.datanode.StorageLocation; import org.apache.hadoop.hdfs.server.datanode.fsdataset.*; import org.apache.hadoop.util.FakeTimer; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import org.mockito.stubbing.Answer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -34,14 +35,12 @@ import java.util.concurrent.TimeUnit; import java.util.*; -import static org.hamcrest.core.Is.is; -import static org.junit.Assert.assertThat; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; - +import static org.assertj.core.api.Assertions.assertThat; /** * Test a few more conditions not covered by TestDatasetVolumeChecker. @@ -55,7 +54,7 @@ public class TestDatasetVolumeCheckerFailures { private static final long MIN_DISK_CHECK_GAP_MS = 1000; // 1 second. - @Before + @BeforeEach public void commonInit() { timer = new FakeTimer(); conf = new HdfsConfiguration(); @@ -67,7 +66,8 @@ public void commonInit() { * Test timeout in {@link DatasetVolumeChecker#checkAllVolumes}. * @throws Exception */ - @Test(timeout=60000) + @Test + @Timeout(value = 60) public void testTimeout() throws Exception { // Add a volume whose check routine hangs forever. final List volumes = @@ -84,7 +84,7 @@ public void testTimeout() throws Exception { // Ensure that the hung volume is detected as failed. Set failedVolumes = checker.checkAllVolumes(dataset); - assertThat(failedVolumes.size(), is(1)); + assertThat(failedVolumes.size()).isEqualTo(1); } /** @@ -92,7 +92,8 @@ public void testTimeout() throws Exception { * * @throws Exception */ - @Test(timeout=60000) + @Test + @Timeout(value = 60) public void testCheckingClosedVolume() throws Exception { // Add a volume that cannot be referenced. final List volumes = @@ -103,15 +104,16 @@ public void testCheckingClosedVolume() throws Exception { DatasetVolumeChecker checker = new DatasetVolumeChecker(conf, timer); Set failedVolumes = checker.checkAllVolumes(dataset); - assertThat(failedVolumes.size(), is(0)); - assertThat(checker.getNumSyncDatasetChecks(), is(0L)); + assertThat(failedVolumes.size()).isEqualTo(0); + assertThat(checker.getNumSyncDatasetChecks()).isEqualTo(0L); // The closed volume should not have been checked as it cannot // be referenced. verify(volumes.get(0), times(0)).check(any()); } - @Test(timeout=60000) + @Test + @Timeout(value = 60) public void testMinGapIsEnforcedForSyncChecks() throws Exception { final List volumes = TestDatasetVolumeChecker.makeVolumes(1, VolumeCheckResult.HEALTHY); @@ -120,18 +122,18 @@ public void testMinGapIsEnforcedForSyncChecks() throws Exception { final DatasetVolumeChecker checker = new DatasetVolumeChecker(conf, timer); checker.checkAllVolumes(dataset); - assertThat(checker.getNumSyncDatasetChecks(), is(1L)); + assertThat(checker.getNumSyncDatasetChecks()).isEqualTo(1L); // Re-check without advancing the timer. Ensure the check is skipped. checker.checkAllVolumes(dataset); - assertThat(checker.getNumSyncDatasetChecks(), is(1L)); - assertThat(checker.getNumSkippedChecks(), is(1L)); + assertThat(checker.getNumSyncDatasetChecks()).isEqualTo(1L); + assertThat(checker.getNumSkippedChecks()).isEqualTo(1L); // Re-check after advancing the timer. Ensure the check is performed. timer.advance(MIN_DISK_CHECK_GAP_MS); checker.checkAllVolumes(dataset); - assertThat(checker.getNumSyncDatasetChecks(), is(2L)); - assertThat(checker.getNumSkippedChecks(), is(1L)); + assertThat(checker.getNumSyncDatasetChecks()).isEqualTo(2L); + assertThat(checker.getNumSkippedChecks()).isEqualTo(1L); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeCheckerTimeout.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeCheckerTimeout.java index fa044740be677..c0f8a1def3d9d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeCheckerTimeout.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeCheckerTimeout.java @@ -23,14 +23,15 @@ import org.apache.hadoop.hdfs.server.datanode.StorageLocation; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; +import org.apache.hadoop.test.TestName; import org.apache.hadoop.util.FakeTimer; -import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TestName; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.*; +import static org.assertj.core.api.Assertions.assertThat; + +import org.junit.jupiter.api.extension.RegisterExtension; import org.mockito.stubbing.Answer; import org.slf4j.LoggerFactory; @@ -46,7 +47,8 @@ public class TestDatasetVolumeCheckerTimeout { public static final org.slf4j.Logger LOG = LoggerFactory.getLogger(TestDatasetVolumeCheckerTimeout.class); - @Rule + @SuppressWarnings("checkstyle:VisibilityModifier") + @RegisterExtension public TestName testName = new TestName(); static Configuration conf; @@ -81,7 +83,8 @@ static FsVolumeSpi makeSlowVolume() throws Exception { return volume; } - @Test (timeout = 300000) + @Test + @Timeout(value = 300) public void testDiskCheckTimeout() throws Exception { LOG.info("Executing {}", testName.getMethodName()); final FsVolumeSpi volume = makeSlowVolume(); @@ -103,9 +106,9 @@ public void call(Set healthyVolumes, // Assert that the disk check registers a failed volume due to // timeout - assertThat(healthyVolumes.size(), is(0)); - assertThat(failedVolumes.size(), is(1)); - } + assertThat(healthyVolumes.size()).isEqualTo(0); + assertThat(failedVolumes.size()).isEqualTo(1); + } }); // Wait for the callback @@ -116,6 +119,6 @@ public void call(Set healthyVolumes, // Ensure that the check was invoked only once. verify(volume, times(1)).check(any()); - assertThat(numCallbackInvocations.get(), is(1L)); + assertThat(numCallbackInvocations.get()).isEqualTo(1L); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestStorageLocationChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestStorageLocationChecker.java index 80f0396c6fc30..aae8ce6f4f3cc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestStorageLocationChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestStorageLocationChecker.java @@ -23,9 +23,8 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.server.datanode.StorageLocation; import org.apache.hadoop.util.FakeTimer; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; import org.slf4j.Logger; @@ -41,9 +40,10 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY; import static org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult.*; -import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.*; +import static org.assertj.core.api.Assertions.assertThat; /** * Unit tests for the {@link StorageLocationChecker} class. @@ -52,15 +52,13 @@ public class TestStorageLocationChecker { public static final Logger LOG = LoggerFactory.getLogger( TestStorageLocationChecker.class); - @Rule - public ExpectedException thrown = ExpectedException.none(); - /** * Verify that all healthy locations are correctly handled and that the * check routine is invoked as expected. * @throws Exception */ - @Test(timeout=30000) + @Test + @Timeout(value = 30) public void testAllLocationsHealthy() throws Exception { final List locations = makeMockLocations(HEALTHY, HEALTHY, HEALTHY); @@ -71,7 +69,7 @@ public void testAllLocationsHealthy() throws Exception { List filteredLocations = checker.check(conf, locations); // All locations should be healthy. - assertThat(filteredLocations.size(), is(3)); + assertThat(filteredLocations.size()).isEqualTo(3); // Ensure that the check method was invoked for each location. for (StorageLocation location : locations) { @@ -85,7 +83,8 @@ public void testAllLocationsHealthy() throws Exception { * * @throws Exception */ - @Test(timeout=30000) + @Test + @Timeout(value = 30) public void testFailedLocationsBelowThreshold() throws Exception { final List locations = makeMockLocations(HEALTHY, HEALTHY, FAILED); // 2 healthy, 1 failed. @@ -94,7 +93,7 @@ public void testFailedLocationsBelowThreshold() throws Exception { StorageLocationChecker checker = new StorageLocationChecker(conf, new FakeTimer()); List filteredLocations = checker.check(conf, locations); - assertThat(filteredLocations.size(), is(2)); + assertThat(filteredLocations.size()).isEqualTo(2); } /** @@ -103,20 +102,22 @@ public void testFailedLocationsBelowThreshold() throws Exception { * * @throws Exception */ - @Test(timeout=30000) + @Test + @Timeout(value = 30) public void testFailedLocationsAboveThreshold() throws Exception { final List locations = makeMockLocations(HEALTHY, FAILED, FAILED); // 1 healthy, 2 failed. final Configuration conf = new HdfsConfiguration(); conf.setInt(DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 1); - thrown.expect(IOException.class); - thrown.expectMessage("Too many failed volumes - current valid volumes: 1," + IOException ex = assertThrows(IOException.class, () -> { + StorageLocationChecker checker = + new StorageLocationChecker(conf, new FakeTimer()); + checker.check(conf, locations); + }); + assertTrue(ex.getMessage().contains("Too many failed volumes - current valid volumes: 1," + " volumes configured: 3, volumes failed: 2, volume failures" - + " tolerated: 1"); - StorageLocationChecker checker = - new StorageLocationChecker(conf, new FakeTimer()); - checker.check(conf, locations); + + " tolerated: 1")); } /** @@ -124,18 +125,20 @@ public void testFailedLocationsAboveThreshold() throws Exception { * * @throws Exception */ - @Test(timeout=30000) + @Test + @Timeout(value = 30) public void testBadConfiguration() throws Exception { final List locations = makeMockLocations(HEALTHY, HEALTHY, HEALTHY); final Configuration conf = new HdfsConfiguration(); conf.setInt(DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 3); - thrown.expect(HadoopIllegalArgumentException.class); - thrown.expectMessage("Invalid value configured"); - StorageLocationChecker checker = - new StorageLocationChecker(conf, new FakeTimer()); - checker.check(conf, locations); + HadoopIllegalArgumentException ex = assertThrows(HadoopIllegalArgumentException.class, () -> { + StorageLocationChecker checker = + new StorageLocationChecker(conf, new FakeTimer()); + checker.check(conf, locations); + }); + assertTrue(ex.getMessage().contains("Invalid value configured")); } /** @@ -146,7 +149,8 @@ public void testBadConfiguration() throws Exception { * * @throws Exception */ - @Test (timeout=300000) + @Test + @Timeout(value = 300) public void testTimeoutInCheck() throws Exception { final Configuration conf = new HdfsConfiguration(); conf.setTimeDuration(DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY, @@ -164,7 +168,7 @@ public void testTimeoutInCheck() throws Exception { // Check the two locations and ensure that only one of them // was filtered out. List filteredList = checker.check(conf, locations); - assertThat(filteredList.size(), is(1)); + assertThat(filteredList.size()).isEqualTo(1); } finally { checker.shutdownAndWait(10, TimeUnit.SECONDS); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestThrottledAsyncChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestThrottledAsyncChecker.java index 318f8b2734150..f2a9eaa450918 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestThrottledAsyncChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestThrottledAsyncChecker.java @@ -22,7 +22,8 @@ import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableFuture; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.FakeTimer; -import org.junit.Test; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -32,9 +33,9 @@ import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.atomic.AtomicLong; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; /** * Verify functionality of {@link ThrottledAsyncChecker}. @@ -48,7 +49,8 @@ public class TestThrottledAsyncChecker { * Test various scheduling combinations to ensure scheduling and * throttling behave as expected. */ - @Test(timeout=60000) + @Test + @Timeout(value = 60) public void testScheduler() throws Exception { final NoOpCheckable target1 = new NoOpCheckable(); final NoOpCheckable target2 = new NoOpCheckable(); @@ -88,7 +90,8 @@ public void testScheduler() throws Exception { waitTestCheckableCheckCount(target2, 2L); } - @Test (timeout=60000) + @Test + @Timeout(value = 60) public void testConcurrentChecks() throws Exception { final StalledCheckable target = new StalledCheckable(); final FakeTimer timer = new FakeTimer(); @@ -112,7 +115,8 @@ public void testConcurrentChecks() throws Exception { * method. * @throws Exception */ - @Test(timeout=60000) + @Test + @Timeout(value = 60) public void testContextIsPassed() throws Exception { final NoOpCheckable target1 = new NoOpCheckable(); final FakeTimer timer = new FakeTimer(); @@ -148,7 +152,8 @@ public Boolean get() { * * @throws Exception */ - @Test(timeout=60000) + @Test + @Timeout(value = 60) public void testExceptionIsPropagated() throws Exception { final ThrowingCheckable target = new ThrowingCheckable(); final FakeTimer timer = new FakeTimer(); @@ -174,7 +179,8 @@ public void testExceptionIsPropagated() throws Exception { * * @throws Exception */ - @Test(timeout=60000) + @Test + @Timeout(value = 60) public void testExceptionCaching() throws Exception { final ThrowingCheckable target1 = new ThrowingCheckable(); final FakeTimer timer = new FakeTimer(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestThrottledAsyncCheckerTimeout.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestThrottledAsyncCheckerTimeout.java index dac55506eefb1..6270867b50968 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestThrottledAsyncCheckerTimeout.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestThrottledAsyncCheckerTimeout.java @@ -17,16 +17,16 @@ */ package org.apache.hadoop.hdfs.server.datanode.checker; +import org.apache.hadoop.test.TestName; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.FutureCallback; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Futures; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableFuture; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.MoreExecutors; import org.apache.hadoop.util.FakeTimer; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TestName; -import org.junit.rules.Timeout; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.api.extension.RegisterExtension; import org.slf4j.LoggerFactory; import java.util.Optional; @@ -37,22 +37,21 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReentrantLock; -import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.timeout; import static org.mockito.Mockito.verify; +import static org.assertj.core.api.Assertions.assertThat; +@Timeout(300) public class TestThrottledAsyncCheckerTimeout { public static final org.slf4j.Logger LOG = LoggerFactory.getLogger(TestThrottledAsyncCheckerTimeout.class); - @Rule + @SuppressWarnings("checkstyle:VisibilityModifier") + @RegisterExtension public TestName testName = new TestName(); - @Rule - public Timeout testTimeout = new Timeout(300_000); private static final long DISK_CHECK_TIMEOUT = 10; private ReentrantLock lock; @@ -61,7 +60,7 @@ private ExecutorService getExecutorService() { return new ScheduledThreadPoolExecutor(1); } - @Before + @BeforeEach public void initializeLock() { lock = new ReentrantLock(); } @@ -111,8 +110,8 @@ public void onFailure(Throwable t) { lock.unlock(); - assertThat(numCallbackInvocationsFailure.get(), is(1L)); - assertThat(numCallbackInvocationsSuccess.get(), is(0L)); + assertThat(numCallbackInvocationsFailure.get()).isEqualTo(1L); + assertThat(numCallbackInvocationsSuccess.get()).isEqualTo(0L); assertTrue(throwable[0] instanceof TimeoutException); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/TestExternalDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/TestExternalDataset.java index e4391529cf5b5..afc82263e89d7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/TestExternalDataset.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/TestExternalDataset.java @@ -22,7 +22,7 @@ import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; -import org.junit.Test; +import org.junit.jupiter.api.Test; /** * Tests the ability to create external FsDatasetSpi implementations. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestAvailableSpaceVolumeChoosingPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestAvailableSpaceVolumeChoosingPolicy.java index 2243398c8af09..7f57d4eb2526c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestAvailableSpaceVolumeChoosingPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestAvailableSpaceVolumeChoosingPolicy.java @@ -19,6 +19,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_THRESHOLD_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_KEY; +import static org.junit.jupiter.api.Assertions.assertEquals; import java.util.ArrayList; import java.util.List; @@ -28,8 +29,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.ReflectionUtils; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import org.mockito.Mockito; public class TestAvailableSpaceVolumeChoosingPolicy { @@ -55,7 +56,8 @@ private static void initPolicy(VolumeChoosingPolicy policy, // Test the Round-Robin block-volume fallback path when all volumes are within // the threshold. - @Test(timeout=60000) + @Test + @Timeout(value = 60) public void testRR() throws Exception { @SuppressWarnings("unchecked") final AvailableSpaceVolumeChoosingPolicy policy = @@ -66,7 +68,8 @@ public void testRR() throws Exception { // ChooseVolume should throw DiskOutOfSpaceException // with volume and block sizes in exception message. - @Test(timeout=60000) + @Test + @Timeout(value = 60) public void testRRPolicyExceptionMessage() throws Exception { final AvailableSpaceVolumeChoosingPolicy policy = new AvailableSpaceVolumeChoosingPolicy(); @@ -74,7 +77,8 @@ public void testRRPolicyExceptionMessage() throws Exception { TestRoundRobinVolumeChoosingPolicy.testRRPolicyExceptionMessage(policy); } - @Test(timeout=60000) + @Test + @Timeout(value = 60) public void testTwoUnbalancedVolumes() throws Exception { @SuppressWarnings("unchecked") final AvailableSpaceVolumeChoosingPolicy policy = @@ -91,15 +95,13 @@ public void testTwoUnbalancedVolumes() throws Exception { // than the threshold of 1MB. volumes.add(Mockito.mock(FsVolumeSpi.class)); Mockito.when(volumes.get(1).getAvailable()).thenReturn(1024L * 1024L * 3); - Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100, - null)); - Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100, - null)); - Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100, - null)); + assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100, null)); + assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100, null)); + assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100, null)); } - - @Test(timeout=60000) + + @Test + @Timeout(value = 60) public void testThreeUnbalancedVolumes() throws Exception { @SuppressWarnings("unchecked") final AvailableSpaceVolumeChoosingPolicy policy = @@ -123,29 +125,22 @@ public void testThreeUnbalancedVolumes() throws Exception { // We should alternate assigning between the two volumes with a lot of free // space. initPolicy(policy, BALANCED_SPACE_THRESHOLD, 1.0f); - Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100, - null)); - Assert.assertEquals(volumes.get(2), policy.chooseVolume(volumes, 100, - null)); - Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100, - null)); - Assert.assertEquals(volumes.get(2), policy.chooseVolume(volumes, 100, - null)); + assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100, null)); + assertEquals(volumes.get(2), policy.chooseVolume(volumes, 100, null)); + assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100, null)); + assertEquals(volumes.get(2), policy.chooseVolume(volumes, 100, null)); // All writes should be assigned to the volume with the least free space. initPolicy(policy, BALANCED_SPACE_THRESHOLD, 0.0f); - Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100, - null)); - Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100, - null)); - Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100, - null)); - Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100, - null)); + assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100, null)); + assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100, null)); + assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100, null)); + assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100, null)); } - @Test(timeout=60000) + @Test + @Timeout(value = 60) public void testSameAvailableVolumeSpace() throws Exception { @SuppressWarnings("unchecked") final AvailableSpaceVolumeChoosingPolicy policy = @@ -172,29 +167,22 @@ public void testSameAvailableVolumeSpace() throws Exception { // We should alternate assigning between all the above volumes // for they have the same available space initPolicy(policy, 0, 1.0f); - Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100, - null)); - Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100, - null)); - Assert.assertEquals(volumes.get(2), policy.chooseVolume(volumes, 100, - null)); - Assert.assertEquals(volumes.get(3), policy.chooseVolume(volumes, 100, - null)); + assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100, null)); + assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100, null)); + assertEquals(volumes.get(2), policy.chooseVolume(volumes, 100, null)); + assertEquals(volumes.get(3), policy.chooseVolume(volumes, 100, null)); // We should alternate assigning between all the above volumes // for they have the same available space initPolicy(policy, 0, 0.0f); - Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100, - null)); - Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100, - null)); - Assert.assertEquals(volumes.get(2), policy.chooseVolume(volumes, 100, - null)); - Assert.assertEquals(volumes.get(3), policy.chooseVolume(volumes, 100, - null)); + assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100, null)); + assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100, null)); + assertEquals(volumes.get(2), policy.chooseVolume(volumes, 100, null)); + assertEquals(volumes.get(3), policy.chooseVolume(volumes, 100, null)); } - @Test(timeout=60000) + @Test + @Timeout(value = 60) public void testFourUnbalancedVolumes() throws Exception { @SuppressWarnings("unchecked") final AvailableSpaceVolumeChoosingPolicy policy = @@ -222,29 +210,22 @@ public void testFourUnbalancedVolumes() throws Exception { // We should alternate assigning between the two volumes with a lot of free // space. initPolicy(policy, BALANCED_SPACE_THRESHOLD, 1.0f); - Assert.assertEquals(volumes.get(2), policy.chooseVolume(volumes, 100, - null)); - Assert.assertEquals(volumes.get(3), policy.chooseVolume(volumes, 100, - null)); - Assert.assertEquals(volumes.get(2), policy.chooseVolume(volumes, 100, - null)); - Assert.assertEquals(volumes.get(3), policy.chooseVolume(volumes, 100, - null)); + assertEquals(volumes.get(2), policy.chooseVolume(volumes, 100, null)); + assertEquals(volumes.get(3), policy.chooseVolume(volumes, 100, null)); + assertEquals(volumes.get(2), policy.chooseVolume(volumes, 100, null)); + assertEquals(volumes.get(3), policy.chooseVolume(volumes, 100, null)); // We should alternate assigning between the two volumes with less free // space. initPolicy(policy, BALANCED_SPACE_THRESHOLD, 0.0f); - Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100, - null)); - Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100, - null)); - Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100, - null)); - Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100, - null)); + assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100, null)); + assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100, null)); + assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100, null)); + assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100, null)); } - - @Test(timeout=60000) + + @Test + @Timeout(value = 60) public void testNotEnoughSpaceOnSelectedVolume() throws Exception { @SuppressWarnings("unchecked") final AvailableSpaceVolumeChoosingPolicy policy = @@ -266,11 +247,11 @@ public void testNotEnoughSpaceOnSelectedVolume() throws Exception { // space to accept the replica size, and another volume does have enough // free space, that should be chosen instead. initPolicy(policy, BALANCED_SPACE_THRESHOLD, 0.0f); - Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, - 1024L * 1024L * 2, null)); + assertEquals(volumes.get(1), policy.chooseVolume(volumes, 1024L * 1024L * 2, null)); } - - @Test(timeout=60000) + + @Test + @Timeout(value = 60) public void testAvailableSpaceChanges() throws Exception { @SuppressWarnings("unchecked") final AvailableSpaceVolumeChoosingPolicy policy = @@ -294,26 +275,29 @@ public void testAvailableSpaceChanges() throws Exception { // Should still be able to get a volume for the replica even though the // available space on the second volume changed. - Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, - 100, null)); + assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100, null)); } - - @Test(timeout=60000) + + @Test + @Timeout(value = 60) public void randomizedTest1() throws Exception { doRandomizedTest(0.75f, 1, 1); } - @Test(timeout=60000) + @Test + @Timeout(value = 60) public void randomizedTest2() throws Exception { doRandomizedTest(0.75f, 5, 1); } - @Test(timeout=60000) + @Test + @Timeout(value = 60) public void randomizedTest3() throws Exception { doRandomizedTest(0.75f, 1, 5); } - @Test(timeout=60000) + @Test + @Timeout(value = 60) public void randomizedTest4() throws Exception { doRandomizedTest(0.90f, 5, 1); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestRoundRobinVolumeChoosingPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestRoundRobinVolumeChoosingPolicy.java index fc99d3c7e54b0..efd52a62eeca8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestRoundRobinVolumeChoosingPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestRoundRobinVolumeChoosingPolicy.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_ROUND_ROBIN_VOLUME_CHOOSING_POLICY_ADDITIONAL_AVAILABLE_SPACE_KEY; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.fail; import java.io.IOException; import java.util.ArrayList; @@ -27,8 +29,7 @@ import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException; import org.apache.hadoop.util.ReflectionUtils; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.mockito.Mockito; public class TestRoundRobinVolumeChoosingPolicy { @@ -55,20 +56,19 @@ public static void testRR(VolumeChoosingPolicy policy) Mockito.when(volumes.get(1).getAvailable()).thenReturn(200L); // Test two rounds of round-robin choosing - Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 0, null)); - Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 0, null)); - Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 0, null)); - Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 0, null)); + assertEquals(volumes.get(0), policy.chooseVolume(volumes, 0, null)); + assertEquals(volumes.get(1), policy.chooseVolume(volumes, 0, null)); + assertEquals(volumes.get(0), policy.chooseVolume(volumes, 0, null)); + assertEquals(volumes.get(1), policy.chooseVolume(volumes, 0, null)); // The first volume has only 100L space, so the policy should // wisely choose the second one in case we ask for more. - Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 150, - null)); + assertEquals(volumes.get(1), policy.chooseVolume(volumes, 150, null)); // Fail if no volume can be chosen? try { policy.chooseVolume(volumes, Long.MAX_VALUE, null); - Assert.fail(); + fail(); } catch (IOException e) { // Passed. } @@ -103,15 +103,13 @@ public static void testRRWithAdditionalAvailableSpace( // The first volume has only 100L space, so the policy should choose // the second one with additional available space configured as 100L. - Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 0, - null)); - Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 0, - null)); + assertEquals(volumes.get(1), policy.chooseVolume(volumes, 0, null)); + assertEquals(volumes.get(1), policy.chooseVolume(volumes, 0, null)); // Fail if no volume can be chosen? try { policy.chooseVolume(volumes, 100, null); - Assert.fail(); + fail(); } catch (IOException e) { // Passed. } @@ -141,12 +139,13 @@ public static void testRRPolicyExceptionMessage( int blockSize = 700; try { policy.chooseVolume(volumes, blockSize, null); - Assert.fail("expected to throw DiskOutOfSpaceException"); + fail("expected to throw DiskOutOfSpaceException"); } catch(DiskOutOfSpaceException e) { - Assert.assertEquals("Not returnig the expected message", + assertEquals( "Out of space: The volume with the most available space (=" + 600 + " B) is less than the block size (=" + blockSize + " B).", - e.getMessage()); + e.getMessage(), + "Unexpected exception message"); } } @@ -183,23 +182,18 @@ public static void testRRPolicyWithStorageTypes( .thenReturn(StorageType.SSD); Mockito.when(ssdVolumes.get(1).getAvailable()).thenReturn(100L); - Assert.assertEquals(diskVolumes.get(0), - policy.chooseVolume(diskVolumes, 0, null)); + assertEquals(diskVolumes.get(0), policy.chooseVolume(diskVolumes, 0, null)); // Independent Round-Robin for different storage type - Assert.assertEquals(ssdVolumes.get(0), - policy.chooseVolume(ssdVolumes, 0, null)); + assertEquals(ssdVolumes.get(0), policy.chooseVolume(ssdVolumes, 0, null)); // Take block size into consideration - Assert.assertEquals(ssdVolumes.get(0), - policy.chooseVolume(ssdVolumes, 150L, null)); + assertEquals(ssdVolumes.get(0), policy.chooseVolume(ssdVolumes, 150L, null)); - Assert.assertEquals(diskVolumes.get(1), - policy.chooseVolume(diskVolumes, 0, null)); - Assert.assertEquals(diskVolumes.get(0), - policy.chooseVolume(diskVolumes, 50L, null)); + assertEquals(diskVolumes.get(1), policy.chooseVolume(diskVolumes, 0, null)); + assertEquals(diskVolumes.get(0), policy.chooseVolume(diskVolumes, 50L, null)); try { policy.chooseVolume(diskVolumes, 200L, null); - Assert.fail("Should throw an DiskOutOfSpaceException before this!"); + fail("Should throw an DiskOutOfSpaceException before this!"); } catch (DiskOutOfSpaceException e) { // Pass. } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java index 9095594fa4ff7..994def0849cdf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java @@ -35,8 +35,8 @@ import org.apache.hadoop.hdfs.server.datanode.StorageLocation; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.fail; public class FsDatasetTestUtil { @@ -104,9 +104,9 @@ public static void assertFileLockReleased(String dir) throws IOException { try (RandomAccessFile raf = new RandomAccessFile(lockFile, "rws"); FileChannel channel = raf.getChannel()) { FileLock lock = channel.tryLock(); - assertNotNull(String.format( + assertNotNull(lock, String.format( "Lock file at %s appears to be held by a different process.", - lockFile.getAbsolutePath()), lock); + lockFile.getAbsolutePath())); if (lock != null) { try { lock.release(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java index eaaa7b0c89227..f687b604f9658 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java @@ -30,10 +30,9 @@ import static org.apache.hadoop.fs.StorageType.RAM_DISK; import static org.apache.hadoop.hdfs.DFSConfigKeys.*; import static org.apache.hadoop.util.Shell.getMemlockLimit; -import static org.hamcrest.core.Is.is; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.fail; +import static org.assertj.core.api.Assertions.assertThat; import java.io.File; import java.io.IOException; @@ -55,6 +54,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; +import org.junit.jupiter.api.Timeout; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -80,11 +80,10 @@ import org.apache.hadoop.net.unix.TemporarySocketDirectory; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; -import org.junit.After; -import org.junit.Rule; -import org.junit.rules.Timeout; +import org.junit.jupiter.api.AfterEach; import org.slf4j.event.Level; +@Timeout(300) public abstract class LazyPersistTestCase { static final byte LAZY_PERSIST_POLICY_ID = (byte) 15; @@ -129,7 +128,7 @@ public abstract class LazyPersistTestCase { protected JMXGet jmx; protected TemporarySocketDirectory sockDir; - @After + @AfterEach public void shutDownCluster() throws Exception { // Dump all RamDisk JMX metrics before shutdown the cluster @@ -155,15 +154,12 @@ public void shutDownCluster() throws Exception { sockDir = null; } - @Rule - public Timeout timeout = Timeout.seconds(300); - protected final LocatedBlocks ensureFileReplicasOnStorageType( Path path, StorageType storageType) throws IOException, TimeoutException, InterruptedException { // Ensure that returned block locations returned are correct! LOG.info("Ensure path: {} is on StorageType: {}", path, storageType); - assertThat(fs.exists(path), is(true)); + assertThat(fs.exists(path)).isEqualTo(true); long fileLength = client.getFileInfo(path.toString()).getLen(); GenericTestUtils.waitFor(() -> { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestAddBlockPoolException.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestAddBlockPoolException.java index c01dbc3eecd0f..f2bcdb3c4ff9a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestAddBlockPoolException.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestAddBlockPoolException.java @@ -17,14 +17,14 @@ */ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.assertFalse; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.assertFalse; import static org.mockito.Mockito.mock; import java.io.IOException; import java.util.concurrent.ConcurrentHashMap; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; -import org.junit.Test; +import org.junit.jupiter.api.Test; /** * Tests to ensure AddBlockPoolException behaves correctly when additional diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestCacheByPmemMappableBlockLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestCacheByPmemMappableBlockLoader.java index baea8f5de2bbb..c9ec7fcd3bf69 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestCacheByPmemMappableBlockLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestCacheByPmemMappableBlockLoader.java @@ -23,10 +23,10 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_PMEM_CACHE_DIRS_KEY; import static org.apache.hadoop.test.MetricsAsserts.getMetrics; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import java.io.File; import java.io.IOException; @@ -54,19 +54,19 @@ import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.MetricsAsserts; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import org.slf4j.event.Level; import java.util.function.Supplier; import org.apache.hadoop.thirdparty.com.google.common.primitives.Ints; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_FSDATASETCACHE_MAX_THREADS_PER_VOLUME_KEY; -import static org.junit.Assume.assumeTrue; +import static org.junit.jupiter.api.Assumptions.assumeTrue; /** * Tests HDFS persistent memory cache by PmemMappableBlockLoader. @@ -103,9 +103,9 @@ public class TestCacheByPmemMappableBlockLoader { LoggerFactory.getLogger(FsDatasetCache.class), Level.DEBUG); } - @BeforeClass + @BeforeAll public static void setUpClass() throws Exception { - assumeTrue("Requires PMDK", NativeIO.POSIX.isPmdkAvailable()); + assumeTrue(NativeIO.POSIX.isPmdkAvailable(), "Requires PMDK"); oldInjector = DataNodeFaultInjector.get(); DataNodeFaultInjector.set(new DataNodeFaultInjector() { @@ -121,12 +121,12 @@ public void endOfferService() throws Exception { }); } - @AfterClass + @AfterAll public static void tearDownClass() throws Exception { DataNodeFaultInjector.set(oldInjector); } - @Before + @BeforeEach public void setUp() throws Exception { conf = new HdfsConfiguration(); conf.setLong( @@ -155,7 +155,7 @@ public void setUp() throws Exception { cacheManager = ((FsDatasetImpl) dn.getFSDataset()).cacheManager; } - @After + @AfterEach public void tearDown() throws Exception { if (fs != null) { fs.close(); @@ -209,12 +209,13 @@ public List getExtendedBlockId(Path filePath, long fileLen) return keys; } - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testCacheAndUncache() throws Exception { final int maxCacheBlocksNum = Ints.checkedCast(CACHE_CAPACITY / BLOCK_SIZE); BlockReaderTestUtil.enableHdfsCachingTracing(); - Assert.assertEquals(0, CACHE_CAPACITY % BLOCK_SIZE); + assertEquals(0, CACHE_CAPACITY % BLOCK_SIZE); assertEquals(CACHE_CAPACITY, cacheManager.getCacheCapacity()); // DRAM cache is expected to be disabled. assertEquals(0L, cacheManager.getMemCacheCapacity()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java index ac9587e54f7ee..2302a4ec0d21a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java @@ -41,8 +41,9 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.Time; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertEquals; /** Test if a datanode can correctly upgrade itself */ public class TestDatanodeRestart { @@ -122,13 +123,13 @@ private void testRbwReplicas(MiniDFSCluster cluster, boolean isCorrupt) // check volumeMap: one rwr replica String bpid = cluster.getNamesystem().getBlockPoolId(); ReplicaMap replicas = dataset(dn).volumeMap; - Assert.assertEquals(1, replicas.size(bpid)); + assertEquals(1, replicas.size(bpid)); ReplicaInfo replica = replicas.replicas(bpid).iterator().next(); - Assert.assertEquals(ReplicaState.RWR, replica.getState()); + assertEquals(ReplicaState.RWR, replica.getState()); if (isCorrupt) { - Assert.assertEquals((fileLen-1)/512*512, replica.getNumBytes()); + assertEquals((fileLen - 1) / 512 * 512, replica.getNumBytes()); } else { - Assert.assertEquals(fileLen, replica.getNumBytes()); + assertEquals(fileLen, replica.getNumBytes()); } dataset(dn).invalidate(bpid, new Block[]{replica}); } finally { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetCache.java index 8b1a6c0814ca8..6f8d200308603 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetCache.java @@ -20,8 +20,9 @@ import net.jcip.annotations.NotThreadSafe; import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports; import static org.apache.hadoop.test.MetricsAsserts.getMetrics; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.ArgumentMatchers.anyInt; @@ -83,12 +84,12 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.MetricsAsserts; import org.apache.log4j.Logger; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import org.slf4j.event.Level; import java.util.function.Supplier; @@ -131,7 +132,7 @@ public class TestFsDatasetCache { LoggerFactory.getLogger(FsDatasetCache.class), Level.DEBUG); } - @BeforeClass + @BeforeAll public static void setUpClass() throws Exception { oldInjector = DataNodeFaultInjector.get(); DataNodeFaultInjector.set(new DataNodeFaultInjector() { @@ -146,12 +147,12 @@ public void endOfferService() throws Exception { }); } - @AfterClass + @AfterAll public static void tearDownClass() throws Exception { DataNodeFaultInjector.set(oldInjector); } - @Before + @BeforeEach public void setUp() throws Exception { conf = new HdfsConfiguration(); conf.setLong( @@ -179,7 +180,7 @@ public void setUp() throws Exception { spyNN = InternalDataNodeTestUtils.spyOnBposToNN(dn, nn); } - @After + @AfterEach public void tearDown() throws Exception { // Verify that each test uncached whatever it cached. This cleanup is // required so that file descriptors are not leaked across tests. @@ -286,15 +287,15 @@ private void testCacheAndUncacheBlock() throws Exception { // Get the details of the written file HdfsBlockLocation[] locs = (HdfsBlockLocation[])fs.getFileBlockLocations(testFile, 0, testFileLen); - assertEquals("Unexpected number of blocks", NUM_BLOCKS, locs.length); + assertEquals(NUM_BLOCKS, locs.length, "Unexpected number of blocks"); final long[] blockSizes = getBlockSizes(locs); // Check initial state final long cacheCapacity = fsd.getCacheCapacity(); long cacheUsed = fsd.getCacheUsed(); long current = 0; - assertEquals("Unexpected cache capacity", CACHE_CAPACITY, cacheCapacity); - assertEquals("Unexpected amount of cache used", current, cacheUsed); + assertEquals(CACHE_CAPACITY, cacheCapacity, "Unexpected cache capacity"); + assertEquals(current, cacheUsed, "Unexpected amount of cache used"); MetricsRecordBuilder dnMetrics; long numCacheCommands = 0; @@ -307,9 +308,8 @@ private void testCacheAndUncacheBlock() throws Exception { current + blockSizes[i], i + 1, fsd); dnMetrics = getMetrics(dn.getMetrics().name()); long cmds = MetricsAsserts.getLongCounter("BlocksCached", dnMetrics); - assertTrue("Expected more cache requests from the NN (" - + cmds + " <= " + numCacheCommands + ")", - cmds > numCacheCommands); + assertTrue(cmds > numCacheCommands, + "Expected more cache requests from the NN (" + cmds + " <= " + numCacheCommands + ")"); numCacheCommands = cmds; } @@ -321,14 +321,14 @@ private void testCacheAndUncacheBlock() throws Exception { NUM_BLOCKS - 1 - i, fsd); dnMetrics = getMetrics(dn.getMetrics().name()); long cmds = MetricsAsserts.getLongCounter("BlocksUncached", dnMetrics); - assertTrue("Expected more uncache requests from the NN", - cmds > numUncacheCommands); + assertTrue(cmds > numUncacheCommands, "Expected more uncache requests from the NN"); numUncacheCommands = cmds; } LOG.info("finishing testCacheAndUncacheBlock"); } - @Test(timeout=600000) + @Test + @Timeout(value = 600) public void testCacheAndUncacheBlockSimple() throws Exception { testCacheAndUncacheBlock(); } @@ -337,7 +337,8 @@ public void testCacheAndUncacheBlockSimple() throws Exception { * Run testCacheAndUncacheBlock with some failures injected into the mlock * call. This tests the ability of the NameNode to resend commands. */ - @Test(timeout=600000) + @Test + @Timeout(value = 600) public void testCacheAndUncacheBlockWithRetries() throws Exception { // We don't have to save the previous cacheManipulator // because it will be reinstalled by the @After function. @@ -360,7 +361,8 @@ public void mlock(String identifier, testCacheAndUncacheBlock(); } - @Test(timeout=600000) + @Test + @Timeout(value = 600) public void testFilesExceedMaxLockedMemory() throws Exception { LOG.info("beginning testFilesExceedMaxLockedMemory"); @@ -409,8 +411,7 @@ public Boolean get() { } }, 500, 30000); // Also check the metrics for the failure - assertTrue("Expected more than 0 failed cache attempts", - fsd.getNumBlocksFailedToCache() > 0); + assertTrue(fsd.getNumBlocksFailedToCache() > 0, "Expected more than 0 failed cache attempts"); // Uncache the n-1 files int curCachedBlocks = 16; @@ -424,7 +425,8 @@ public Boolean get() { LOG.info("finishing testFilesExceedMaxLockedMemory"); } - @Test(timeout=600000) + @Test + @Timeout(value = 600) public void testUncachingBlocksBeforeCachingFinishes() throws Exception { LOG.info("beginning testUncachingBlocksBeforeCachingFinishes"); final int NUM_BLOCKS = 5; @@ -439,15 +441,15 @@ public void testUncachingBlocksBeforeCachingFinishes() throws Exception { // Get the details of the written file HdfsBlockLocation[] locs = (HdfsBlockLocation[])fs.getFileBlockLocations(testFile, 0, testFileLen); - assertEquals("Unexpected number of blocks", NUM_BLOCKS, locs.length); + assertEquals(NUM_BLOCKS, locs.length, "Unexpected number of blocks"); final long[] blockSizes = getBlockSizes(locs); // Check initial state final long cacheCapacity = fsd.getCacheCapacity(); long cacheUsed = fsd.getCacheUsed(); long current = 0; - assertEquals("Unexpected cache capacity", CACHE_CAPACITY, cacheCapacity); - assertEquals("Unexpected amount of cache used", current, cacheUsed); + assertEquals(CACHE_CAPACITY, cacheCapacity, "Unexpected cache capacity"); + assertEquals(current, cacheUsed, "Unexpected amount of cache used"); NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator() { @Override @@ -457,7 +459,7 @@ public void mlock(String identifier, try { Thread.sleep(3000); } catch (InterruptedException e) { - Assert.fail(); + fail(); } } }); @@ -478,7 +480,8 @@ public void mlock(String identifier, LOG.info("finishing testUncachingBlocksBeforeCachingFinishes"); } - @Test(timeout=60000) + @Test + @Timeout(value = 60) public void testUncacheUnknownBlock() throws Exception { // Create a file Path fileName = new Path("/testUncacheUnknownBlock"); @@ -498,13 +501,13 @@ public Boolean get() { }, 100, 10000); } - @Test(timeout=600000) + @Test + @Timeout(value = 600) public void testPageRounder() throws Exception { // Write a small file Path fileName = new Path("/testPageRounder"); final int smallBlocks = 512; // This should be smaller than the page size - assertTrue("Page size should be greater than smallBlocks!", - PAGE_SIZE > smallBlocks); + assertTrue(PAGE_SIZE > smallBlocks, "Page size should be greater than smallBlocks!"); final int numBlocks = 5; final int fileLen = smallBlocks * numBlocks; FSDataOutputStream out = @@ -521,7 +524,8 @@ public void testPageRounder() throws Exception { DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd); } - @Test(timeout=60000) + @Test + @Timeout(value = 60) public void testUncacheQuiesces() throws Exception { // Create a file Path fileName = new Path("/testUncacheQuiesces"); @@ -559,12 +563,13 @@ public Boolean get() { MetricsAsserts.assertCounter("BlocksUncached", 1l, dnMetrics); } - @Test(timeout=60000) + @Test + @Timeout(value = 60) public void testReCacheAfterUncache() throws Exception { final int TOTAL_BLOCKS_PER_CACHE = Ints.checkedCast(CACHE_CAPACITY / BLOCK_SIZE); BlockReaderTestUtil.enableHdfsCachingTracing(); - Assert.assertEquals(0, CACHE_CAPACITY % BLOCK_SIZE); + assertEquals(0, CACHE_CAPACITY % BLOCK_SIZE); // Create a small file final Path SMALL_FILE = new Path("/smallFile"); @@ -602,7 +607,7 @@ public Boolean get() { .setPool("pool").setPath(SMALL_FILE).setReplication((short)1).build()); Thread.sleep(10000); MetricsRecordBuilder dnMetrics = getMetrics(dn.getMetrics().name()); - Assert.assertEquals(TOTAL_BLOCKS_PER_CACHE, + assertEquals(TOTAL_BLOCKS_PER_CACHE, MetricsAsserts.getLongCounter("BlocksCached", dnMetrics)); // Uncache the big file and verify that the small file can now be @@ -626,7 +631,7 @@ public Boolean get() { } LOG.info("directive " + shortCacheDirectiveId + " has been cached."); } catch (IOException e) { - Assert.fail("unexpected exception" + e.toString()); + fail("unexpected exception" + e.toString()); } return true; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java index f58ee729ef98f..883290ef41c4e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java @@ -79,16 +79,16 @@ import org.apache.hadoop.io.MultipleIOException; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.LambdaTestUtils; +import org.apache.hadoop.test.TestName; import org.apache.hadoop.util.DiskChecker; import org.apache.hadoop.util.FakeTimer; import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.StringUtils; -import org.junit.Assert; -import org.junit.Before; -import org.junit.After; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TestName; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.api.extension.RegisterExtension; import org.mockito.Mockito; import java.io.File; @@ -107,16 +107,15 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DN_CACHED_DFSUSED_CHECK_INTERVAL_MS; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY; -import static org.hamcrest.core.Is.is; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertSame; -import static org.junit.Assert.fail; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertSame; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.ArgumentMatchers.anyList; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyString; @@ -160,7 +159,8 @@ public class TestFsDatasetImpl { private final static String BLOCKPOOL = "BP-TEST"; - @Rule + @SuppressWarnings("checkstyle:VisibilityModifier") + @RegisterExtension public TestName name = new TestName(); private static Storage.StorageDirectory createStorageDirectory(File root, @@ -221,7 +221,7 @@ private int getNumVolumes() { } } - @Before + @BeforeEach public void setUp() throws IOException { datanode = mock(DataNode.class); storage = mock(DataStorage.class); @@ -251,7 +251,7 @@ public void setUp() throws IOException { assertEquals(0, dataset.getNumFailedVolumes()); } - @After + @AfterEach public void checkDataSetLockManager() { manager.lockLeakCheck(); // make sure no lock Leak. @@ -497,7 +497,8 @@ public void testAddVolumeWithSameStorageUuid() throws IOException { } } - @Test(timeout = 30000) + @Test + @Timeout(value = 30) public void testRemoveOneVolume() throws IOException { // Feed FsDataset with block metadata. final int numBlocks = 100; @@ -533,10 +534,10 @@ public void testRemoveOneVolume() throws IOException { volReferences.close(); dataset.removeVolumes(volumesToRemove, true); int expectedNumVolumes = dataDirs.length - 1; - assertEquals("The volume has been removed from the volumeList.", - expectedNumVolumes, getNumVolumes()); - assertEquals("The volume has been removed from the storageMap.", - expectedNumVolumes, dataset.storageMap.size()); + assertEquals(expectedNumVolumes, getNumVolumes(), + "The volume has been removed from the volumeList."); + assertEquals(expectedNumVolumes, dataset.storageMap.size(), + "The volume has been removed from the storageMap."); // DataNode.notifyNamenodeDeletedBlock() should be called 50 times // as we deleted one volume that has 50 blocks @@ -559,12 +560,12 @@ public void run() {} for (String bpid : dataset.volumeMap.getBlockPoolList()) { totalNumReplicas += dataset.volumeMap.size(bpid); } - assertEquals("The replica infos on this volume has been removed from the " - + "volumeMap.", numBlocks / NUM_INIT_VOLUMES, - totalNumReplicas); + assertEquals(numBlocks / NUM_INIT_VOLUMES, totalNumReplicas, + "The replica infos on this volume has been removed from the " + "volumeMap."); } - @Test(timeout = 30000) + @Test + @Timeout(value = 30) public void testRemoveTwoVolumes() throws IOException { // Feed FsDataset with block metadata. final int numBlocks = 100; @@ -603,10 +604,10 @@ public void testRemoveTwoVolumes() throws IOException { dataset.removeVolumes(volumesToRemove, true); int expectedNumVolumes = dataDirs.length - 2; - assertEquals("The volume has been removed from the volumeList.", - expectedNumVolumes, getNumVolumes()); - assertEquals("The volume has been removed from the storageMap.", - expectedNumVolumes, dataset.storageMap.size()); + assertEquals(expectedNumVolumes, getNumVolumes(), + "The volume has been removed from the volumeList."); + assertEquals(expectedNumVolumes, dataset.storageMap.size(), + "The volume has been removed from the storageMap."); // DataNode.notifyNamenodeDeletedBlock() should be called 100 times // as we deleted 2 volumes that have 100 blocks totally @@ -631,11 +632,12 @@ public void run() {} for (String bpid : dataset.volumeMap.getBlockPoolList()) { totalNumReplicas += dataset.volumeMap.size(bpid); } - assertEquals("The replica infos on this volume has been removed from the " - + "volumeMap.", 0, totalNumReplicas); + assertEquals(0, totalNumReplicas, + "The replica infos on this volume has been removed from the " + "volumeMap."); } - @Test(timeout = 30000) + @Test + @Timeout(value = 30) public void testConcurrentWriteAndDeleteBlock() throws Exception { // Feed FsDataset with block metadata. final int numBlocks = 1000; @@ -685,7 +687,8 @@ public void run() { } } - @Test(timeout = 5000) + @Test + @Timeout(value = 5) public void testRemoveNewlyAddedVolume() throws IOException { final int numExistingVolumes = getNumVolumes(); List nsInfos = new ArrayList<>(); @@ -823,12 +826,9 @@ public void testDuplicateReplicaResolution() throws IOException { assertNull(BlockPoolSlice.selectReplicaToDelete(replicaNewer, replica)); // keep latest found replica - assertSame(replica, - BlockPoolSlice.selectReplicaToDelete(replicaOtherSame, replica)); - assertSame(replicaOtherOlder, - BlockPoolSlice.selectReplicaToDelete(replicaOtherOlder, replica)); - assertSame(replica, - BlockPoolSlice.selectReplicaToDelete(replicaOtherNewer, replica)); + assertSame(replica, BlockPoolSlice.selectReplicaToDelete(replicaOtherSame, replica)); + assertSame(replicaOtherOlder, BlockPoolSlice.selectReplicaToDelete(replicaOtherOlder, replica)); + assertSame(replica, BlockPoolSlice.selectReplicaToDelete(replicaOtherNewer, replica)); } @Test @@ -922,7 +922,8 @@ private long getDfsUsedValueOfNewVolume(long cacheDfsUsed, return dfsUsed; } - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testRemoveVolumeBeingWritten() throws Exception { // Will write and remove on dn0. final ExtendedBlock eb = new ExtendedBlock(BLOCK_POOL_IDS[0], 0); @@ -979,7 +980,7 @@ public void run() { volumesToRemove.add(dataset.getVolume(eb).getStorageLocation()); } catch (Exception e) { LOG.info("Problem preparing volumes to remove: ", e); - Assert.fail("Exception in remove volume thread, check log for " + + fail("Exception in remove volume thread, check log for " + "details."); } LOG.info("Removing volume " + volumesToRemove); @@ -1061,8 +1062,8 @@ public void testCleanShutdownOfVolume() throws Exception { finalizedDir.setExecutable(false); assertTrue(FileUtil.setWritable(finalizedDir, false)); } - Assert.assertTrue("Reference count for the volume should be greater " - + "than 0", volume.getReferenceCount() > 0); + assertTrue(volume.getReferenceCount() > 0, + "Reference count for the volume should be greater " + "than 0"); // Invoke the synchronous checkDiskError method dataNode.checkDiskError(); // Sleep for 1 second so that datanode can interrupt and cluster clean up @@ -1071,11 +1072,11 @@ public void testCleanShutdownOfVolume() throws Exception { return volume.getReferenceCount() == 0; } }, 100, 1000); - assertThat(dataNode.getFSDataset().getNumFailedVolumes(), is(1)); + assertThat(dataNode.getFSDataset().getNumFailedVolumes()).isEqualTo(1); try { out.close(); - Assert.fail("This is not a valid code path. " + fail("This is not a valid code path. " + "out.close should have thrown an exception."); } catch (IOException ioe) { GenericTestUtils.assertExceptionContains(info.getXferAddr(), ioe); @@ -1087,7 +1088,8 @@ public void testCleanShutdownOfVolume() throws Exception { } } - @Test(timeout = 30000) + @Test + @Timeout(value = 30) public void testReportBadBlocks() throws Exception { boolean threwException = false; final Configuration config = new HdfsConfiguration(); @@ -1095,7 +1097,7 @@ public void testReportBadBlocks() throws Exception { .numDataNodes(1).build()) { cluster.waitActive(); - Assert.assertEquals(0, cluster.getNamesystem().getCorruptReplicaBlocks()); + assertEquals(0, cluster.getNamesystem().getCorruptReplicaBlocks()); DataNode dataNode = cluster.getDataNodes().get(0); ExtendedBlock block = new ExtendedBlock(cluster.getNamesystem().getBlockPoolId(), 0); try { @@ -1105,8 +1107,8 @@ public void testReportBadBlocks() throws Exception { threwException = true; } Thread.sleep(3000); - Assert.assertFalse(threwException); - Assert.assertEquals(0, cluster.getNamesystem().getCorruptReplicaBlocks()); + assertFalse(threwException); + assertEquals(0, cluster.getNamesystem().getCorruptReplicaBlocks()); FileSystem fs = cluster.getFileSystem(); Path filePath = new Path(name.getMethodName()); @@ -1117,8 +1119,8 @@ public void testReportBadBlocks() throws Exception { dataNode.reportBadBlocks(block, dataNode.getFSDataset().getFsVolumeReferences().get(0)); DataNodeTestUtils.triggerHeartbeat(dataNode); BlockManagerTestUtil.updateState(cluster.getNamesystem().getBlockManager()); - assertEquals("Corrupt replica blocks could not be reflected with the heartbeat", 1, - cluster.getNamesystem().getCorruptReplicaBlocks()); + assertEquals(1, cluster.getNamesystem().getCorruptReplicaBlocks(), + "Corrupt replica blocks could not be reflected with the heartbeat"); } } @@ -1127,7 +1129,8 @@ public void testReportBadBlocks() throws Exception { * and append happened in the middle, * block movement should fail and hardlink is removed. */ - @Test(timeout = 30000) + @Test + @Timeout(value = 30) public void testMoveBlockFailure() { // Test copy testMoveBlockFailure(conf); @@ -1188,7 +1191,8 @@ private void testMoveBlockFailure(Configuration config) { } } - @Test(timeout = 30000) + @Test + @Timeout(value = 30) public void testMoveBlockSuccess() { MiniDFSCluster cluster = null; try { @@ -1222,7 +1226,8 @@ public void testMoveBlockSuccess() { * Make sure datanode restart can clean up un-finalized links, * if the block is not finalized yet. */ - @Test(timeout = 30000) + @Test + @Timeout(value = 30) public void testDnRestartWithHardLinkInTmp() { MiniDFSCluster cluster = null; try { @@ -1279,7 +1284,8 @@ public void testDnRestartWithHardLinkInTmp() { * If new block is finalized and DN restarted, * DiskScanner should clean up the hardlink correctly. */ - @Test(timeout = 30000) + @Test + @Timeout(value = 30) public void testDnRestartWithHardLink() throws Exception { MiniDFSCluster cluster = null; boolean isReplicaDeletionEnabled = @@ -1365,7 +1371,8 @@ public void testDnRestartWithHardLink() throws Exception { } } - @Test(timeout = 30000) + @Test + @Timeout(value = 30) public void testMoveBlockSuccessWithSameMountMove() { MiniDFSCluster cluster = null; try { @@ -1413,7 +1420,8 @@ public void testMoveBlockSuccessWithSameMountMove() { } // Move should fail if the volume on same mount has no space. - @Test(timeout = 30000) + @Test + @Timeout(value = 30) public void testMoveBlockWithSameMountMoveWithoutSpace() { MiniDFSCluster cluster = null; try { @@ -1455,7 +1463,8 @@ public void testMoveBlockWithSameMountMoveWithoutSpace() { } // More tests on shouldConsiderSameMountVolume. - @Test(timeout = 10000) + @Test + @Timeout(value = 10) public void testShouldConsiderSameMountVolume() throws IOException { FsVolumeImpl volume = new FsVolumeImplBuilder() .setConf(conf) @@ -1559,7 +1568,8 @@ private FsVolumeSpi getDestinationVolume(ExtendedBlock block, FsDatasetImpl return destVolume; } - @Test(timeout = 3000000) + @Test + @Timeout(value = 3000) public void testBlockReadOpWhileMovingBlock() throws IOException { MiniDFSCluster cluster = null; try { @@ -1595,21 +1605,21 @@ public void testBlockReadOpWhileMovingBlock() throws IOException { (DistributedFileSystem) fs, blk, 0, 512 * 2); byte[] buf = new byte[512 * 2]; blkReader.read(buf, 0, 512); - assertEquals(blockData.substring(0, 512), new String(buf, - StandardCharsets.US_ASCII).substring(0, 512)); + assertEquals(blockData.substring(0, 512), + new String(buf, StandardCharsets.US_ASCII).substring(0, 512)); // Part 2: Move block and than read remaining block FsDatasetImpl fsDataSetImpl = (FsDatasetImpl) dataNode.getFSDataset(); ReplicaInfo replicaInfo = fsDataSetImpl.getReplicaInfo(block); FsVolumeSpi destVolume = getDestinationVolume(block, fsDataSetImpl); - assertNotNull("Destination volume should not be null.", destVolume); + assertNotNull(destVolume, "Destination volume should not be null."); fsDataSetImpl.moveBlock(block, replicaInfo, destVolume.obtainReference(), false); // Trigger block report to update block info in NN cluster.triggerBlockReports(); blkReader.read(buf, 512, 512); - assertEquals(blockData.substring(0, 512 * 2), new String(buf, - StandardCharsets.US_ASCII).substring(0, 512 * 2)); + assertEquals(blockData.substring(0, 512 * 2), + new String(buf, StandardCharsets.US_ASCII).substring(0, 512 * 2)); blkReader = BlockReaderTestUtil.getBlockReader( (DistributedFileSystem) fs, blk, 0, blockData.length()); @@ -1641,7 +1651,8 @@ public void testBlockReadOpWhileMovingBlock() throws IOException { } } - @Test(timeout=30000) + @Test + @Timeout(value = 30) public void testDataDirWithPercent() throws IOException { String baseDir = new FileSystemTestHelper().getTestRootDir(); File dataDir = new File(baseDir, "invalidFormatString-%z"); @@ -1768,7 +1779,8 @@ public void testNotifyNamenodeMissingOrNewBlock() throws Exception { } } - @Test(timeout = 20000) + @Test + @Timeout(value = 20) public void testReleaseVolumeRefIfExceptionThrown() throws IOException { MiniDFSCluster cluster = new MiniDFSCluster.Builder( new HdfsConfiguration()).build(); @@ -1801,7 +1813,8 @@ public void testReleaseVolumeRefIfExceptionThrown() throws IOException { } } - @Test(timeout = 30000) + @Test + @Timeout(value = 30) public void testTransferAndNativeCopyMetrics() throws IOException { Configuration config = new HdfsConfiguration(); config.setInt( @@ -1998,8 +2011,7 @@ public void testInvalidateMissingBlock() throws Exception { // Assert local block file wouldn't be deleted from disk. assertTrue(blockFile.exists()); // Assert block info would be removed from ReplicaMap. - assertEquals("null", - fsdataset.getReplicaString(bpid, replicaInfo.getBlockId())); + assertEquals("null", fsdataset.getReplicaString(bpid, replicaInfo.getBlockId())); BlockManager blockManager = cluster.getNameNode(). getNamesystem().getBlockManager(); GenericTestUtils.waitFor(() -> @@ -2100,15 +2112,16 @@ public void delayGetMetaDataInputStream() { String expectedMsg = String.format("opReadBlock %s received exception " + "java.io.FileNotFoundException: %s (No such file or directory)", blk.getBlock(), tmpReplicaInfo.getMetadataURI().getPath()); - assertTrue("Expected log message not found in DN log.", - logCapturer.getOutput().contains(expectedMsg)); + assertTrue(logCapturer.getOutput().contains(expectedMsg), + "Expected log message not found in DN log."); } finally { cluster.shutdown(); DataNodeFaultInjector.set(oldDnInjector); } } - @Test(timeout = 30000) + @Test + @Timeout(value = 30) public void testAppend() { MiniDFSCluster cluster = null; try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java index 1b638e28fa7de..6c00e9690bb91 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java @@ -42,9 +42,9 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.util.StringUtils; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import org.mockito.Mockito; import java.io.File; @@ -60,11 +60,11 @@ import java.util.stream.Collectors; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.Assertions.assertNull; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; @@ -83,7 +83,7 @@ public class TestFsVolumeList { private final static int DEFAULT_BLOCK_SIZE = 102400; private final static int BUFFER_LENGTH = 1024; - @Before + @BeforeEach public void setUp() { dataset = mock(FsDatasetImpl.class); baseDir = new FileSystemTestHelper().getTestRootDir(); @@ -94,7 +94,8 @@ public void setUp() { conf = new Configuration(); } - @Test(timeout=30000) + @Test + @Timeout(value = 30) public void testGetNextVolumeWithClosedVolume() throws IOException { FsVolumeList volumeList = new FsVolumeList( Collections.emptyList(), @@ -138,7 +139,8 @@ public Boolean get() { } } - @Test(timeout=30000) + @Test + @Timeout(value = 30) public void testReleaseVolumeRefIfNoBlockScanner() throws IOException { FsVolumeList volumeList = new FsVolumeList( Collections.emptyList(), null, blockChooser, conf, null); @@ -172,7 +174,7 @@ public void testDfsReservedForDifferentStorageTypes() throws IOException { .setStorageID("storage-id") .setConf(conf) .build(); - assertEquals("", 100L, volume.getReserved()); + assertEquals(100L, volume.getReserved(), ""); // when storage type reserved is configured. conf.setLong( DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY + "." @@ -190,7 +192,7 @@ public void testDfsReservedForDifferentStorageTypes() throws IOException { .setStorageID("storage-id") .setConf(conf) .build(); - assertEquals("", 1L, volume1.getReserved()); + assertEquals(1L, volume1.getReserved(), ""); FsVolumeImpl volume2 = new FsVolumeImplBuilder().setDataset(dataset) .setStorageDirectory( new StorageDirectory( @@ -198,7 +200,7 @@ public void testDfsReservedForDifferentStorageTypes() throws IOException { .setStorageID("storage-id") .setConf(conf) .build(); - assertEquals("", 2L, volume2.getReserved()); + assertEquals(2L, volume2.getReserved(), ""); FsVolumeImpl volume3 = new FsVolumeImplBuilder().setDataset(dataset) .setStorageDirectory( new StorageDirectory( @@ -206,7 +208,7 @@ public void testDfsReservedForDifferentStorageTypes() throws IOException { .setStorageID("storage-id") .setConf(conf) .build(); - assertEquals("", 100L, volume3.getReserved()); + assertEquals(100L, volume3.getReserved(), ""); FsVolumeImpl volume4 = new FsVolumeImplBuilder().setDataset(dataset) .setStorageDirectory( new StorageDirectory( @@ -214,7 +216,7 @@ public void testDfsReservedForDifferentStorageTypes() throws IOException { .setStorageID("storage-id") .setConf(conf) .build(); - assertEquals("", 100L, volume4.getReserved()); + assertEquals(100L, volume4.getReserved(), ""); FsVolumeImpl volume5 = new FsVolumeImplBuilder().setDataset(dataset) .setStorageDirectory( new StorageDirectory( @@ -367,7 +369,8 @@ public void testDfsReservedPercentageForDifferentStorageTypes() assertEquals(200, volume5.getAvailable()); } - @Test(timeout = 300000) + @Test + @Timeout(value = 300) public void testAddRplicaProcessorForAddingReplicaInMap() throws Exception { BlockPoolSlice.reInitializeAddReplicaThreadPool(); Configuration cnf = new Configuration(); @@ -414,13 +417,13 @@ public void run() { // It will create BlockPoolSlice.AddReplicaProcessor task's and lunch in // ForkJoinPool recursively vol.getVolumeMap(bpid, volumeMap, ramDiskReplicaMap); - assertTrue("Failed to add all the replica to map", volumeMap.replicas(bpid) - .size() == 1000); - assertEquals("Fork pool should be initialize with configured pool size", - poolSize, BlockPoolSlice.getAddReplicaForkPoolSize()); + assertTrue(volumeMap.replicas(bpid).size() == 1000, "Failed to add all the replica to map"); + assertEquals(poolSize, BlockPoolSlice.getAddReplicaForkPoolSize(), + "Fork pool should be initialize with configured pool size"); } - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testInstanceOfAddReplicaThreadPool() throws Exception { // Start cluster with multiple namespace try (MiniDFSCluster cluster = new MiniDFSCluster.Builder( @@ -436,9 +439,8 @@ public void testInstanceOfAddReplicaThreadPool() throws Exception { cluster.getNamesystem(0).getBlockPoolId()).getAddReplicaThreadPool(); ForkJoinPool threadPool2 = vol.getBlockPoolSlice( cluster.getNamesystem(1).getBlockPoolId()).getAddReplicaThreadPool(); - assertEquals( - "Thread pool instance should be same in all the BlockPoolSlice", - threadPool1, threadPool2); + assertEquals(threadPool1, threadPool2, + "Thread pool instance should be same in all the BlockPoolSlice"); } } @@ -529,13 +531,11 @@ public void testGetVolumeWithSameDiskArchival() throws Exception { // 1) getVolumeRef should return correct reference. assertEquals(diskVolume, - volumeList.getMountVolumeMap() - .getVolumeRefByMountAndStorageType( - device, StorageType.DISK).getVolume()); + volumeList.getMountVolumeMap().getVolumeRefByMountAndStorageType(device, StorageType.DISK) + .getVolume()); assertEquals(archivalVolume, volumeList.getMountVolumeMap() - .getVolumeRefByMountAndStorageType( - device, StorageType.ARCHIVE).getVolume()); + .getVolumeRefByMountAndStorageType(device, StorageType.ARCHIVE).getVolume()); // 2) removeVolume should work as expected volumeList.removeVolume(diskVolume.getStorageLocation(), true); @@ -543,8 +543,7 @@ public void testGetVolumeWithSameDiskArchival() throws Exception { .getVolumeRefByMountAndStorageType( device, StorageType.DISK)); assertEquals(archivalVolume, volumeList.getMountVolumeMap() - .getVolumeRefByMountAndStorageType( - device, StorageType.ARCHIVE).getVolume()); + .getVolumeRefByMountAndStorageType(device, StorageType.ARCHIVE).getVolume()); } // Test dfs stats with same disk archival @@ -619,10 +618,8 @@ public void testDfsUsageStatWithSameDiskArchival() throws Exception { .when(spyDiskVolume).getDfUsed(); Mockito.doReturn(dfUsage) .when(spyArchivalVolume).getDfUsed(); - assertEquals(expectedActualNonDfsUsage, - spyDiskVolume.getActualNonDfsUsed()); - assertEquals(expectedActualNonDfsUsage, - spyArchivalVolume.getActualNonDfsUsed()); + assertEquals(expectedActualNonDfsUsage, spyDiskVolume.getActualNonDfsUsed()); + assertEquals(expectedActualNonDfsUsage, spyArchivalVolume.getActualNonDfsUsed()); // 3) When there is only one volume on a disk mount, // we allocate the full disk capacity regardless of the default ratio. @@ -714,14 +711,14 @@ public void testExcludeSlowDiskWhenChoosingVolume() throws Exception { DEFAULT_BLOCK_SIZE, (short) 3, 0, false, null); // Asserts that the number of blocks created on a slow disk is 0. - Assert.assertEquals(0, dn0.getVolumeReport().stream() - .filter(v -> (v.getPath() + "/").equals(slowDisk0OnDn0)).collect(Collectors.toList()).get(0) - .getNumBlocks()); - Assert.assertEquals(0, dn1.getVolumeReport().stream() - .filter(v -> (v.getPath() + "/").equals(slowDisk0OnDn1)).collect(Collectors.toList()).get(0) - .getNumBlocks()); - Assert.assertEquals(0, dn2.getVolumeReport().stream() - .filter(v -> (v.getPath() + "/").equals(slowDisk0OnDn2)).collect(Collectors.toList()).get(0) - .getNumBlocks()); + assertEquals(0, + dn0.getVolumeReport().stream().filter(v -> (v.getPath() + "/").equals(slowDisk0OnDn0)) + .collect(Collectors.toList()).get(0).getNumBlocks()); + assertEquals(0, + dn1.getVolumeReport().stream().filter(v -> (v.getPath() + "/").equals(slowDisk0OnDn1)) + .collect(Collectors.toList()).get(0).getNumBlocks()); + assertEquals(0, + dn2.getVolumeReport().stream().filter(v -> (v.getPath() + "/").equals(slowDisk0OnDn2)) + .collect(Collectors.toList()).get(0).getNumBlocks()); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java index 659d53eda9b6e..54641795bc73e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java @@ -17,9 +17,11 @@ */ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import java.io.IOException; import java.net.InetSocketAddress; @@ -61,10 +63,9 @@ import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.net.NetUtils; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; -import static org.junit.Assume.assumeTrue; +import static org.junit.jupiter.api.Assumptions.assumeTrue; /** * This tests InterDataNodeProtocol for block handling. @@ -117,8 +118,8 @@ public Writable call(RPC.RpcKind rpcKind, String protocol, Writable param, long public static void checkMetaInfo(ExtendedBlock b, DataNode dn) throws IOException { Block metainfo = DataNodeTestUtils.getFSDataset(dn).getStoredBlock( b.getBlockPoolId(), b.getBlockId()); - Assert.assertEquals(b.getBlockId(), metainfo.getBlockId()); - Assert.assertEquals(b.getNumBytes(), metainfo.getNumBytes()); + assertEquals(b.getBlockId(), metainfo.getBlockId()); + assertEquals(b.getNumBytes(), metainfo.getNumBytes()); } public static LocatedBlock getLastLocatedBlock( @@ -222,11 +223,12 @@ private static ReplicaInfo createReplicaInfo(Block b) { return new FinalizedReplica(b, new ExternalVolumeImpl(), null); } - private static void assertEquals(ReplicaInfo originalInfo, ReplicaRecoveryInfo recoveryInfo) { - Assert.assertEquals(originalInfo.getBlockId(), recoveryInfo.getBlockId()); - Assert.assertEquals(originalInfo.getGenerationStamp(), recoveryInfo.getGenerationStamp()); - Assert.assertEquals(originalInfo.getBytesOnDisk(), recoveryInfo.getNumBytes()); - Assert.assertEquals(originalInfo.getState(), recoveryInfo.getOriginalReplicaState()); + private static void assertReplicaEquals(ReplicaInfo originalInfo, ReplicaRecoveryInfo + recoveryInfo) { + assertEquals(originalInfo.getBlockId(), recoveryInfo.getBlockId()); + assertEquals(originalInfo.getGenerationStamp(), recoveryInfo.getGenerationStamp()); + assertEquals(originalInfo.getBytesOnDisk(), recoveryInfo.getNumBytes()); + assertEquals(originalInfo.getState(), recoveryInfo.getOriginalReplicaState()); } /** Test @@ -255,28 +257,28 @@ public void testInitReplicaRecovery() throws IOException { final ReplicaRecoveryInfo recoveryInfo = FsDatasetImpl .initReplicaRecovery(bpid, map, blocks[0], recoveryid, DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT, manager); - assertEquals(originalInfo, recoveryInfo); + assertReplicaEquals(originalInfo, recoveryInfo); final ReplicaUnderRecovery updatedInfo = (ReplicaUnderRecovery)map.get(bpid, b); - Assert.assertEquals(originalInfo.getBlockId(), updatedInfo.getBlockId()); - Assert.assertEquals(recoveryid, updatedInfo.getRecoveryID()); + assertEquals(originalInfo.getBlockId(), updatedInfo.getBlockId()); + assertEquals(recoveryid, updatedInfo.getRecoveryID()); //recover one more time final long recoveryid2 = gs + 2; final ReplicaRecoveryInfo recoveryInfo2 = FsDatasetImpl .initReplicaRecovery(bpid, map, blocks[0], recoveryid2, DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT, manager); - assertEquals(originalInfo, recoveryInfo2); + assertReplicaEquals(originalInfo, recoveryInfo2); final ReplicaUnderRecovery updatedInfo2 = (ReplicaUnderRecovery)map.get(bpid, b); - Assert.assertEquals(originalInfo.getBlockId(), updatedInfo2.getBlockId()); - Assert.assertEquals(recoveryid2, updatedInfo2.getRecoveryID()); + assertEquals(originalInfo.getBlockId(), updatedInfo2.getBlockId()); + assertEquals(recoveryid2, updatedInfo2.getRecoveryID()); //case RecoveryInProgressException try { FsDatasetImpl.initReplicaRecovery(bpid, map, b, recoveryid, DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT, manager); - Assert.fail(); + fail(); } catch(RecoveryInProgressException ripe) { System.out.println("GOOD: getting " + ripe); @@ -289,7 +291,7 @@ public void testInitReplicaRecovery() throws IOException { ReplicaRecoveryInfo r = FsDatasetImpl.initReplicaRecovery(bpid, map, b, recoveryid, DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT, manager); - Assert.assertNull("Data-node should not have this replica.", r); + assertNull(r, "Data-node should not have this replica."); } { // BlockRecoveryFI_02: "THIS IS NOT SUPPOSED TO HAPPEN" with recovery id < gs @@ -298,7 +300,7 @@ public void testInitReplicaRecovery() throws IOException { try { FsDatasetImpl.initReplicaRecovery(bpid, map, b, recoveryid, DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT, manager); - Assert.fail(); + fail(); } catch(IOException ioe) { System.out.println("GOOD: getting " + ioe); @@ -347,11 +349,11 @@ public void testUpdateReplicaUnderRecovery() throws IOException { final LocatedBlock locatedblock = getLastLocatedBlock( DFSClientAdapter.getDFSClient(dfs).getNamenode(), filestr); final DatanodeInfo[] datanodeinfo = locatedblock.getLocations(); - Assert.assertTrue(datanodeinfo.length > 0); + assertTrue(datanodeinfo.length > 0); //get DataNode and FSDataset objects final DataNode datanode = cluster.getDataNode(datanodeinfo[0].getIpcPort()); - Assert.assertTrue(datanode != null); + assertTrue(datanode != null); //initReplicaRecovery final ExtendedBlock b = locatedblock.getBlock(); @@ -364,7 +366,7 @@ public void testUpdateReplicaUnderRecovery() throws IOException { //check replica final Replica replica = cluster.getFsDatasetTestUtils(datanode).fetchReplica(b); - Assert.assertEquals(ReplicaState.RUR, replica.getState()); + assertEquals(ReplicaState.RUR, replica.getState()); //check meta data before update cluster.getFsDatasetTestUtils(datanode).checkStoredReplica(replica); @@ -379,7 +381,7 @@ public void testUpdateReplicaUnderRecovery() throws IOException { //update should fail fsdataset.updateReplicaUnderRecovery(tmp, recoveryid, tmp.getBlockId(), newlength); - Assert.fail(); + fail(); } catch(IOException ioe) { System.out.println("GOOD: getting " + ioe); } @@ -400,28 +402,29 @@ public void testUpdateReplicaUnderRecovery() throws IOException { /** Test to verify that InterDatanode RPC timesout as expected when * the server DN does not respond. */ - @Test(expected=SocketTimeoutException.class) + @Test public void testInterDNProtocolTimeout() throws Throwable { - final Server server = new TestServer(1, true); - server.start(); - - final InetSocketAddress addr = NetUtils.getConnectAddress(server); - DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort()); - DatanodeInfo dInfo = new DatanodeInfoBuilder().setNodeID(fakeDnId) - .build(); - InterDatanodeProtocol proxy = null; - - try { - proxy = DataNode.createInterDataNodeProtocolProxy( - dInfo, conf, 500, false); - proxy.initReplicaRecovery(new RecoveringBlock( - new ExtendedBlock("bpid", 1), null, 100)); - fail ("Expected SocketTimeoutException exception, but did not get."); - } finally { - if (proxy != null) { - RPC.stopProxy(proxy); + assertThrows(SocketTimeoutException.class, () -> { + final Server server = new TestServer(1, true); + server.start(); + final InetSocketAddress addr = NetUtils.getConnectAddress(server); + DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort()); + DatanodeInfo dInfo = new DatanodeInfoBuilder().setNodeID(fakeDnId) + .build(); + InterDatanodeProtocol proxy = null; + try { + proxy = DataNode.createInterDataNodeProtocolProxy( + dInfo, conf, 500, false); + proxy.initReplicaRecovery(new RecoveringBlock( + new ExtendedBlock("bpid", 1), null, 100)); + fail("Expected SocketTimeoutException exception, but did not get."); + } finally { + if (proxy != null) { + RPC.stopProxy(proxy); + } + server.stop(); } - server.stop(); - } + }); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java index 14ed26e9b5544..f1f57a9714f02 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java @@ -23,8 +23,8 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.ThreadUtil; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import java.io.IOException; import java.util.EnumSet; @@ -36,9 +36,10 @@ import static org.apache.hadoop.fs.StorageType.RAM_DISK; -import static org.hamcrest.core.Is.is; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.fail; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; public class TestLazyPersistFiles extends LazyPersistTestCase { private static final int THREADPOOL_SIZE = 10; @@ -101,7 +102,7 @@ public void testCorruptFilesAreDiscarded() // Stop the DataNode. shutdownDataNodes(); - assertThat(cluster.getNamesystem().getNumDeadDataNodes(), is(1)); + assertThat(cluster.getNamesystem().getNumDeadDataNodes()).isEqualTo(1); // Next, wait for the redundancy monitor to mark the file as corrupt. waitForRedundancyMonitorCycle(); @@ -140,7 +141,8 @@ public void testDisableLazyPersistFileScrubber() /** * If NN restarted then lazyPersist files should not deleted */ - @Test(timeout = 20000) + @Test + @Timeout(value = 20) public void testFileShouldNotDiscardedIfNNRestarted() throws IOException, InterruptedException, TimeoutException { getClusterBuilder().setRamDiskReplicaCapacity(2).build(); @@ -182,7 +184,7 @@ public void testConcurrentRead() @Override public void run() { try { - Assert.assertTrue(verifyReadRandomFile(path1, BLOCK_SIZE, SEED)); + assertTrue(verifyReadRandomFile(path1, BLOCK_SIZE, SEED)); } catch (Throwable e) { LOG.error("readerRunnable error", e); testFailed.set(true); @@ -201,7 +203,7 @@ public void run() { for (int i = 0; i < NUM_TASKS; i++) { ThreadUtil.joinUninterruptibly(threads[i]); } - Assert.assertFalse(testFailed.get()); + assertFalse(testFailed.get()); } /** @@ -244,7 +246,7 @@ public void testConcurrentWrites() // Stop executor from adding new tasks to finish existing threads in queue latch.await(); - assertThat(testFailed.get(), is(false)); + assertThat(testFailed.get()).isEqualTo(false); } class WriterRunnable implements Runnable { @@ -284,7 +286,8 @@ public void run() { } } - @Test(timeout = 20000) + @Test + @Timeout(value = 20) public void testReleaseVolumeRefIfExceptionThrown() throws IOException, InterruptedException { getClusterBuilder().setRamDiskReplicaCapacity(2).build(); @@ -313,8 +316,7 @@ public void testReleaseVolumeRefIfExceptionThrown() // asyncLazyPersistService is already shutdown. // If we do not release references, the number of // references will increase infinitely. - Assert.assertTrue( - beforeCnts[i] == afterCnt || beforeCnts[i] == (afterCnt - 1)); + assertTrue(beforeCnts[i] == afterCnt || beforeCnts[i] == (afterCnt - 1)); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistLockedMemory.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistLockedMemory.java index 699854cd1759c..af62032df457d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistLockedMemory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistLockedMemory.java @@ -30,7 +30,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.io.IOException; import java.util.EnumSet; @@ -40,8 +40,7 @@ import static org.apache.hadoop.fs.CreateFlag.LAZY_PERSIST; import static org.apache.hadoop.fs.StorageType.DEFAULT; import static org.apache.hadoop.fs.StorageType.RAM_DISK; -import static org.hamcrest.core.Is.is; -import static org.junit.Assert.assertThat; +import static org.assertj.core.api.Assertions.assertThat; /** * Verify that locked memory is used correctly when writing to replicas in @@ -77,7 +76,7 @@ public void testReservation() Path path = new Path("/" + METHOD_NAME + ".dat"); makeTestFile(path, BLOCK_SIZE, true); ensureFileReplicasOnStorageType(path, RAM_DISK); - assertThat(fsd.getCacheUsed(), is((long) BLOCK_SIZE)); + assertThat(fsd.getCacheUsed()).isEqualTo((long) BLOCK_SIZE); } @Test @@ -91,7 +90,7 @@ public void testReleaseOnFileDeletion() Path path = new Path("/" + METHOD_NAME + ".dat"); makeTestFile(path, BLOCK_SIZE, true); ensureFileReplicasOnStorageType(path, RAM_DISK); - assertThat(fsd.getCacheUsed(), is((long) BLOCK_SIZE)); + assertThat(fsd.getCacheUsed()).isEqualTo((long) BLOCK_SIZE); // Delete the file and ensure that the locked memory is released. fs.delete(path, false); @@ -114,7 +113,7 @@ public void testReleaseOnEviction() throws Exception { Path path1 = new Path("/" + METHOD_NAME + ".01.dat"); makeTestFile(path1, BLOCK_SIZE, true); - assertThat(fsd.getCacheUsed(), is((long) BLOCK_SIZE)); + assertThat(fsd.getCacheUsed()).isEqualTo((long) BLOCK_SIZE); // Wait until the replica is written to persistent storage. waitForMetric("RamDiskBlocksLazyPersisted", 1); @@ -138,7 +137,7 @@ public void testShortBlockFinalized() Path path = new Path("/" + METHOD_NAME + ".dat"); makeTestFile(path, 1, true); - assertThat(fsd.getCacheUsed(), is(osPageSize)); + assertThat(fsd.getCacheUsed()).isEqualTo(osPageSize); // Delete the file and ensure locked RAM usage goes to zero. fs.delete(path, false); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistPolicy.java index 6454f51abdf4f..cfea20c4197b8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistPolicy.java @@ -22,14 +22,11 @@ import org.apache.hadoop.fs.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.io.IOException; -import static org.hamcrest.core.Is.is; -import static org.hamcrest.core.IsNot.not; -import static org.junit.Assert.assertThat; - +import static org.assertj.core.api.Assertions.assertThat; public class TestLazyPersistPolicy extends LazyPersistTestCase { @Test @@ -42,7 +39,7 @@ public void testPolicyNotSetByDefault() throws IOException { // Stat the file and check that the LAZY_PERSIST policy is not // returned back. HdfsFileStatus status = client.getFileInfo(path.toString()); - assertThat(status.getStoragePolicy(), not(LAZY_PERSIST_POLICY_ID)); + assertThat(status.getStoragePolicy()).isNotEqualTo(LAZY_PERSIST_POLICY_ID); } @Test @@ -54,7 +51,7 @@ public void testPolicyPropagation() throws IOException { makeTestFile(path, 0, true); // Stat the file and check that the lazyPersist flag is returned back. HdfsFileStatus status = client.getFileInfo(path.toString()); - assertThat(status.getStoragePolicy(), is(LAZY_PERSIST_POLICY_ID)); + assertThat(status.getStoragePolicy()).isEqualTo(LAZY_PERSIST_POLICY_ID); } @Test @@ -68,7 +65,7 @@ public void testPolicyPersistenceInEditLog() throws IOException { // Stat the file and check that the lazyPersist flag is returned back. HdfsFileStatus status = client.getFileInfo(path.toString()); - assertThat(status.getStoragePolicy(), is(LAZY_PERSIST_POLICY_ID)); + assertThat(status.getStoragePolicy()).isEqualTo(LAZY_PERSIST_POLICY_ID); } @Test @@ -86,6 +83,6 @@ public void testPolicyPersistenceInFsImage() throws IOException { // Stat the file and check that the lazyPersist flag is returned back. HdfsFileStatus status = client.getFileInfo(path.toString()); - assertThat(status.getStoragePolicy(), is(LAZY_PERSIST_POLICY_ID)); + assertThat(status.getStoragePolicy()).isEqualTo(LAZY_PERSIST_POLICY_ID); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistReplicaPlacement.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistReplicaPlacement.java index b6413ec6246fe..c0562cb9dc491 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistReplicaPlacement.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistReplicaPlacement.java @@ -23,16 +23,15 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.io.IOException; import java.util.concurrent.TimeoutException; import static org.apache.hadoop.fs.StorageType.DEFAULT; import static org.apache.hadoop.fs.StorageType.RAM_DISK; -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.junit.Assert.fail; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.fail; public class TestLazyPersistReplicaPlacement extends LazyPersistTestCase { @Test @@ -147,8 +146,8 @@ public void testFallbackToDiskPartial() // Since eviction is asynchronous, depending on the timing of eviction // wrt writes, we may get 2 or less blocks on RAM disk. - assertThat(numBlocksOnRamDisk, is(2)); - assertThat(numBlocksOnDisk, is(3)); + assertThat(numBlocksOnRamDisk).isEqualTo(2); + assertThat(numBlocksOnDisk).isEqualTo(3); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistReplicaRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistReplicaRecovery.java index 5fa470c86e0db..c1d64dbae9d76 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistReplicaRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistReplicaRecovery.java @@ -27,14 +27,14 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.io.IOException; import java.util.concurrent.TimeoutException; import static org.apache.hadoop.fs.StorageType.DEFAULT; import static org.apache.hadoop.fs.StorageType.RAM_DISK; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertTrue; public class TestLazyPersistReplicaRecovery extends LazyPersistTestCase { @Test @@ -61,8 +61,7 @@ public void testDnRestartWithSavedReplicas() ensureFileReplicasOnStorageType(path1, RAM_DISK); LOG.info("Restarting the DataNode"); - assertTrue("DN did not restart properly", - cluster.restartDataNode(0, true)); + assertTrue(cluster.restartDataNode(0, true), "DN did not restart properly"); // wait for blockreport waitForBlockReport(dn, dnd); // Ensure that the replica is now on persistent storage. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyWriter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyWriter.java index 56cc41e37fe38..c9c7239d6b746 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyWriter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyWriter.java @@ -23,8 +23,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.io.IOException; import java.util.ArrayList; @@ -33,9 +32,9 @@ import static org.apache.hadoop.fs.StorageType.DEFAULT; import static org.apache.hadoop.fs.StorageType.RAM_DISK; -import static org.hamcrest.core.Is.is; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; public class TestLazyWriter extends LazyPersistTestCase { @Test @@ -192,9 +191,9 @@ public void testDeleteBeforePersist() ensureFileReplicasOnStorageType(path, RAM_DISK); // Delete before persist client.delete(path.toString(), false); - Assert.assertFalse(fs.exists(path)); + assertFalse(fs.exists(path)); - assertThat(verifyDeletedBlocks(locatedBlocks), is(true)); + assertThat(verifyDeletedBlocks(locatedBlocks)).isEqualTo(true); verifyRamDiskJMXMetric("RamDiskBlocksDeletedBeforeLazyPersisted", 1); } @@ -218,9 +217,9 @@ public void testDeleteAfterPersist() // Delete after persist client.delete(path.toString(), false); - Assert.assertFalse(fs.exists(path)); + assertFalse(fs.exists(path)); - assertThat(verifyDeletedBlocks(locatedBlocks), is(true)); + assertThat(verifyDeletedBlocks(locatedBlocks)).isEqualTo(true); verifyRamDiskJMXMetric("RamDiskBlocksLazyPersisted", 1); verifyRamDiskJMXMetric("RamDiskBytesLazyPersisted", BLOCK_SIZE); } @@ -243,17 +242,17 @@ public void testDfsUsageCreateDelete() makeTestFile(path, BLOCK_SIZE, true); long usedAfterCreate = fs.getUsed(); - assertThat(usedAfterCreate, is((long) BLOCK_SIZE)); + assertThat(usedAfterCreate).isEqualTo((long) BLOCK_SIZE); waitForMetric("RamDiskBlocksLazyPersisted", 1); long usedAfterPersist = fs.getUsed(); - assertThat(usedAfterPersist, is((long) BLOCK_SIZE)); + assertThat(usedAfterPersist).isEqualTo((long) BLOCK_SIZE); // Delete after persist client.delete(path.toString(), false); long usedAfterDelete = fs.getUsed(); - assertThat(usedBeforeCreate, is(usedAfterDelete)); + assertThat(usedBeforeCreate).isEqualTo(usedAfterDelete); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestPmemCacheRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestPmemCacheRecovery.java index 6ce420adeb197..d64f758f932ec 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestPmemCacheRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestPmemCacheRecovery.java @@ -23,11 +23,11 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.*; import static org.apache.hadoop.test.MetricsAsserts.getMetrics; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; -import static org.junit.Assume.assumeTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.Assumptions.assumeTrue; import java.io.File; import java.io.IOException; @@ -55,12 +55,12 @@ import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.MetricsAsserts; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import org.slf4j.event.Level; import java.util.function.Supplier; @@ -102,9 +102,9 @@ public class TestPmemCacheRecovery { LoggerFactory.getLogger(FsDatasetCache.class), Level.DEBUG); } - @BeforeClass + @BeforeAll public static void setUpClass() throws Exception { - assumeTrue("Requires PMDK", NativeIO.POSIX.isPmdkAvailable()); + assumeTrue(NativeIO.POSIX.isPmdkAvailable(), "Requires PMDK"); oldInjector = DataNodeFaultInjector.get(); DataNodeFaultInjector.set(new DataNodeFaultInjector() { @@ -120,12 +120,12 @@ public void endOfferService() throws Exception { }); } - @AfterClass + @AfterAll public static void tearDownClass() throws Exception { DataNodeFaultInjector.set(oldInjector); } - @Before + @BeforeEach public void setUp() throws Exception { conf = new HdfsConfiguration(); conf.setBoolean(DFS_DATANODE_PMEM_CACHE_RECOVERY_KEY, true); @@ -155,7 +155,7 @@ public void setUp() throws Exception { cacheManager = ((FsDatasetImpl) dn.getFSDataset()).cacheManager; } - @After + @AfterEach public void tearDown() throws Exception { if (fs != null) { fs.close(); @@ -215,12 +215,13 @@ public List getExtendedBlockId(Path filePath, long fileLen) return keys; } - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testCacheRecovery() throws Exception { final int cacheBlocksNum = Ints.checkedCast(CACHE_AMOUNT / BLOCK_SIZE); BlockReaderTestUtil.enableHdfsCachingTracing(); - Assert.assertEquals(0, CACHE_AMOUNT % BLOCK_SIZE); + assertEquals(0, CACHE_AMOUNT % BLOCK_SIZE); final Path testFile = new Path("/testFile"); final long testFileLen = cacheBlocksNum * BLOCK_SIZE; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestProvidedImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestProvidedImpl.java index dbd77c7f13a57..0c6cb6751782d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestProvidedImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestProvidedImpl.java @@ -18,10 +18,10 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -82,8 +82,8 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi.BlockIterator; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.StringUtils; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -313,7 +313,7 @@ private int getNumVolumes() { } } - @Before + @BeforeEach public void setUp() throws IOException { datanode = mock(DataNode.class); storage = mock(DataStorage.class); @@ -367,8 +367,7 @@ public void testReserved() throws Exception { @Test public void testProvidedVolumeImpl() throws IOException { - assertEquals(NUM_LOCAL_INIT_VOLUMES + NUM_PROVIDED_INIT_VOLUMES, - getNumVolumes()); + assertEquals(NUM_LOCAL_INIT_VOLUMES + NUM_PROVIDED_INIT_VOLUMES, getNumVolumes()); assertEquals(NUM_PROVIDED_INIT_VOLUMES, providedVolumes.size()); assertEquals(0, dataset.getNumFailedVolumes()); @@ -376,8 +375,7 @@ public void testProvidedVolumeImpl() throws IOException { // check basic information about provided volume assertEquals(DFSConfigKeys.DFS_PROVIDER_STORAGEUUID_DEFAULT, providedVolumes.get(i).getStorageID()); - assertEquals(StorageType.PROVIDED, - providedVolumes.get(i).getStorageType()); + assertEquals(StorageType.PROVIDED, providedVolumes.get(i).getStorageType()); long space = providedVolumes.get(i).getBlockPoolUsed( BLOCK_POOL_IDS[CHOSEN_BP_ID]); @@ -388,9 +386,9 @@ public void testProvidedVolumeImpl() throws IOException { providedVolumes.get(i).shutdownBlockPool( BLOCK_POOL_IDS[1 - CHOSEN_BP_ID], null); try { - assertEquals(0, providedVolumes.get(i) - .getBlockPoolUsed(BLOCK_POOL_IDS[1 - CHOSEN_BP_ID])); - // should not be triggered + assertEquals(0, + providedVolumes.get(i).getBlockPoolUsed(BLOCK_POOL_IDS[1 - CHOSEN_BP_ID])); + // should not be triggered assertTrue(false); } catch (IOException e) { LOG.info("Expected exception: " + e); @@ -413,8 +411,7 @@ public void testBlockLoad() throws IOException { assertEquals(null, volumeMap.replicas(BLOCK_POOL_IDS[j])); } } - assertEquals(NUM_PROVIDED_BLKS, - volumeMap.replicas(BLOCK_POOL_IDS[CHOSEN_BP_ID]).size()); + assertEquals(NUM_PROVIDED_BLKS, volumeMap.replicas(BLOCK_POOL_IDS[CHOSEN_BP_ID]).size()); } } @@ -499,48 +496,37 @@ public void testProvidedVolumeContents() throws IOException { // all these blocks can belong to the provided volume int blocksFound = getBlocksInProvidedVolumes(providedBasePath + "/test1/", expectedBlocks, minId); - assertEquals( - "Number of blocks in provided volumes should be " + expectedBlocks, - expectedBlocks, blocksFound); + assertEquals(expectedBlocks, blocksFound, + "Number of blocks in provided volumes should be " + expectedBlocks); blocksFound = getBlocksInProvidedVolumes( "file:/" + providedBasePath + "/test1/", expectedBlocks, minId); - assertEquals( - "Number of blocks in provided volumes should be " + expectedBlocks, - expectedBlocks, blocksFound); + assertEquals(expectedBlocks, blocksFound, + "Number of blocks in provided volumes should be " + expectedBlocks); // use a path that is entirely different from the providedBasePath // none of these blocks can belong to the volume blocksFound = getBlocksInProvidedVolumes("randomtest1/", expectedBlocks, minId); - assertEquals("Number of blocks in provided volumes should be 0", 0, - blocksFound); + assertEquals(0, blocksFound, "Number of blocks in provided volumes should be 0"); } @Test public void testProvidedVolumeContainsBlock() throws URISyntaxException { assertEquals(true, ProvidedVolumeImpl.containsBlock(null, null)); - assertEquals(false, - ProvidedVolumeImpl.containsBlock(new URI("file:/a"), null)); + assertEquals(false, ProvidedVolumeImpl.containsBlock(new URI("file:/a"), null)); assertEquals(true, - ProvidedVolumeImpl.containsBlock(new URI("file:/a/b/c/"), - new URI("file:/a/b/c/d/e.file"))); + ProvidedVolumeImpl.containsBlock(new URI("file:/a/b/c/"), new URI("file:/a/b/c/d/e.file"))); assertEquals(true, - ProvidedVolumeImpl.containsBlock(new URI("/a/b/c/"), - new URI("file:/a/b/c/d/e.file"))); + ProvidedVolumeImpl.containsBlock(new URI("/a/b/c/"), new URI("file:/a/b/c/d/e.file"))); assertEquals(true, - ProvidedVolumeImpl.containsBlock(new URI("/a/b/c"), - new URI("file:/a/b/c/d/e.file"))); + ProvidedVolumeImpl.containsBlock(new URI("/a/b/c"), new URI("file:/a/b/c/d/e.file"))); assertEquals(true, - ProvidedVolumeImpl.containsBlock(new URI("/a/b/c/"), - new URI("/a/b/c/d/e.file"))); + ProvidedVolumeImpl.containsBlock(new URI("/a/b/c/"), new URI("/a/b/c/d/e.file"))); assertEquals(true, - ProvidedVolumeImpl.containsBlock(new URI("file:/a/b/c/"), - new URI("/a/b/c/d/e.file"))); + ProvidedVolumeImpl.containsBlock(new URI("file:/a/b/c/"), new URI("/a/b/c/d/e.file"))); assertEquals(false, - ProvidedVolumeImpl.containsBlock(new URI("/a/b/e"), - new URI("file:/a/b/c/d/e.file"))); + ProvidedVolumeImpl.containsBlock(new URI("/a/b/e"), new URI("file:/a/b/c/d/e.file"))); assertEquals(false, - ProvidedVolumeImpl.containsBlock(new URI("file:/a/b/e"), - new URI("file:/a/b/c/d/e.file"))); + ProvidedVolumeImpl.containsBlock(new URI("file:/a/b/e"), new URI("file:/a/b/c/d/e.file"))); assertEquals(true, ProvidedVolumeImpl.containsBlock(new URI("s3a:/bucket1/dir1/"), new URI("s3a:/bucket1/dir1/temp.txt"))); @@ -557,31 +543,28 @@ public void testProvidedVolumeContainsBlock() throws URISyntaxException { @Test public void testProvidedReplicaSuffixExtraction() { - assertEquals("B.txt", ProvidedVolumeImpl.getSuffix( - new Path("file:///A/"), new Path("file:///A/B.txt"))); - assertEquals("B/C.txt", ProvidedVolumeImpl.getSuffix( - new Path("file:///A/"), new Path("file:///A/B/C.txt"))); - assertEquals("B/C/D.txt", ProvidedVolumeImpl.getSuffix( - new Path("file:///A/"), new Path("file:///A/B/C/D.txt"))); - assertEquals("D.txt", ProvidedVolumeImpl.getSuffix( - new Path("file:///A/B/C/"), new Path("file:///A/B/C/D.txt"))); - assertEquals("file:/A/B/C/D.txt", ProvidedVolumeImpl.getSuffix( - new Path("file:///X/B/C/"), new Path("file:///A/B/C/D.txt"))); - assertEquals("D.txt", ProvidedVolumeImpl.getSuffix( - new Path("/A/B/C"), new Path("/A/B/C/D.txt"))); - assertEquals("D.txt", ProvidedVolumeImpl.getSuffix( - new Path("/A/B/C/"), new Path("/A/B/C/D.txt"))); + assertEquals("B.txt", + ProvidedVolumeImpl.getSuffix(new Path("file:///A/"), new Path("file:///A/B.txt"))); + assertEquals("B/C.txt", + ProvidedVolumeImpl.getSuffix(new Path("file:///A/"), new Path("file:///A/B/C.txt"))); + assertEquals("B/C/D.txt", + ProvidedVolumeImpl.getSuffix(new Path("file:///A/"), new Path("file:///A/B/C/D.txt"))); + assertEquals("D.txt", ProvidedVolumeImpl.getSuffix(new Path("file:///A/B/C/"), + new Path("file:///A/B/C/D.txt"))); + assertEquals("file:/A/B/C/D.txt", ProvidedVolumeImpl.getSuffix(new Path("file:///X/B/C/"), + new Path("file:///A/B/C/D.txt"))); + assertEquals("D.txt", + ProvidedVolumeImpl.getSuffix(new Path("/A/B/C"), new Path("/A/B/C/D.txt"))); + assertEquals("D.txt", + ProvidedVolumeImpl.getSuffix(new Path("/A/B/C/"), new Path("/A/B/C/D.txt"))); assertEquals("data/current.csv", ProvidedVolumeImpl.getSuffix( - new Path("wasb:///users/alice/"), - new Path("wasb:///users/alice/data/current.csv"))); - assertEquals("current.csv", ProvidedVolumeImpl.getSuffix( - new Path("wasb:///users/alice/data"), + new Path("wasb:///users/alice/"), new Path("wasb:///users/alice/data/current.csv"))); + assertEquals("current.csv", ProvidedVolumeImpl.getSuffix(new Path("wasb:///users/alice/data"), new Path("wasb:///users/alice/data/current.csv"))); assertEquals("wasb:/users/alice/data/current.csv", - ProvidedVolumeImpl.getSuffix( - new Path("wasb:///users/bob/"), + ProvidedVolumeImpl.getSuffix(new Path("wasb:///users/bob/"), new Path("wasb:///users/alice/data/current.csv"))); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaCachingGetSpaceUsed.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaCachingGetSpaceUsed.java index d4382d27fb228..c29c2bf1bc855 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaCachingGetSpaceUsed.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaCachingGetSpaceUsed.java @@ -31,10 +31,10 @@ import org.apache.hadoop.hdfs.server.datanode.Replica; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.io.IOUtils; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import java.io.ByteArrayInputStream; import java.io.IOException; @@ -44,7 +44,8 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DU_INTERVAL_KEY; import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.fail; /** * Unit test for ReplicaCachingGetSpaceUsed class. @@ -55,7 +56,7 @@ public class TestReplicaCachingGetSpaceUsed { private DistributedFileSystem fs; private DataNode dataNode; - @Before + @BeforeEach public void setUp() throws IOException, NoSuchMethodException, InterruptedException { conf = new Configuration(); @@ -70,7 +71,7 @@ public void setUp() fs = cluster.getFileSystem(); } - @After + @AfterEach public void tearDown() throws IOException { if (cluster != null) { cluster.shutdown(); @@ -104,8 +105,7 @@ public void testReplicaCachingGetSpaceUsedByFINALIZEDReplica() // Guarantee ReplicaCachingGetSpaceUsed#refresh() is called after replica // has been written to disk. Thread.sleep(2000); - assertEquals(blockLength + metaLength, - dataNode.getFSDataset().getDfsUsed()); + assertEquals(blockLength + metaLength, dataNode.getFSDataset().getDfsUsed()); fs.delete(new Path("/testReplicaCachingGetSpaceUsedByFINALIZEDReplica"), true); @@ -137,8 +137,7 @@ public void testReplicaCachingGetSpaceUsedByRBWReplica() throws Exception { // Guarantee ReplicaCachingGetSpaceUsed#refresh() is called after replica // has been written to disk. Thread.sleep(2000); - assertEquals(blockLength + metaLength, - dataNode.getFSDataset().getDfsUsed()); + assertEquals(blockLength + metaLength, dataNode.getFSDataset().getDfsUsed()); os.close(); @@ -148,13 +147,13 @@ public void testReplicaCachingGetSpaceUsedByRBWReplica() throws Exception { // After close operation, the replica state will be transformed from RBW to // finalized. But the space used of these replicas are all included and the // dfsUsed value should be same. - assertEquals(blockLength + metaLength, - dataNode.getFSDataset().getDfsUsed()); + assertEquals(blockLength + metaLength, dataNode.getFSDataset().getDfsUsed()); fs.delete(new Path("/testReplicaCachingGetSpaceUsedByRBWReplica"), true); } - @Test(timeout = 15000) + @Test + @Timeout(value = 15) public void testFsDatasetImplDeepCopyReplica() { FsDatasetSpi fsDataset = dataNode.getFSDataset(); ModifyThread modifyThread = new ModifyThread(); @@ -170,7 +169,7 @@ public void testFsDatasetImplDeepCopyReplica() { } } catch (IOException e) { modifyThread.setShouldRun(false); - Assert.fail("Encounter IOException when deep copy replica."); + fail("Encounter IOException when deep copy replica."); } } modifyThread.setShouldRun(false); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaMap.java index 3058598bb6c6e..1b647bc002625 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaMap.java @@ -17,15 +17,14 @@ */ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.fail; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica; -import org.junit.Before; -import org.junit.Test; - +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; /** * Unit test for ReplicasMap class @@ -35,7 +34,7 @@ public class TestReplicaMap { private final String bpid = "BP-TEST"; private final Block block = new Block(1234, 1234, 1234); - @Before + @BeforeEach public void setup() { map.add(bpid, new FinalizedReplica(block, null, null)); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReservedSpaceCalculator.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReservedSpaceCalculator.java index 10136863964b9..0b0e74a2b8a86 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReservedSpaceCalculator.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReservedSpaceCalculator.java @@ -20,8 +20,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.DF; import org.apache.hadoop.fs.StorageType; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.mockito.Mockito; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY; @@ -31,7 +31,8 @@ import static org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.ReservedSpaceCalculator.ReservedSpaceCalculatorAggressive; import static org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.ReservedSpaceCalculator.ReservedSpaceCalculatorConservative; import static org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.ReservedSpaceCalculator.ReservedSpaceCalculatorPercentage; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.mockito.Mockito.when; /** @@ -43,7 +44,7 @@ public class TestReservedSpaceCalculator { private DF usage; private ReservedSpaceCalculator reserved; - @Before + @BeforeEach public void setUp() { conf = new Configuration(); usage = Mockito.mock(DF.class); @@ -217,13 +218,15 @@ public void testReservedSpacePercentagePerDir() { checkReserved(StorageType.DISK, 10000, 4000, dir3); } - @Test(expected = IllegalStateException.class) + @Test public void testInvalidCalculator() { - conf.set(DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY, "INVALIDTYPE"); - reserved = new ReservedSpaceCalculator.Builder(conf) - .setUsage(usage) - .setStorageType(StorageType.DISK) - .build(); + assertThrows(IllegalStateException.class, () -> { + conf.set(DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY, "INVALIDTYPE"); + reserved = new ReservedSpaceCalculator.Builder(conf) + .setUsage(usage) + .setStorageType(StorageType.DISK) + .build(); + }); } private void checkReserved(StorageType storageType, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestScrLazyPersistFiles.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestScrLazyPersistFiles.java index f5e5be65607a3..6b22d0422423d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestScrLazyPersistFiles.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestScrLazyPersistFiles.java @@ -28,13 +28,9 @@ import org.apache.hadoop.net.unix.DomainSocket; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.NativeCodeLoader; -import org.junit.Assert; -import org.junit.Assume; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; import java.io.File; import java.io.IOException; @@ -43,10 +39,13 @@ import static org.apache.hadoop.fs.StorageType.DEFAULT; import static org.apache.hadoop.fs.StorageType.RAM_DISK; import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.core.Is.is; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; +import static org.assertj.core.api.Assumptions.assumeThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assumptions.assumeTrue; /** * Test Lazy persist behavior with short-circuit reads. These tests @@ -55,25 +54,22 @@ */ public class TestScrLazyPersistFiles extends LazyPersistTestCase { - @BeforeClass + @BeforeAll public static void init() { DomainSocket.disableBindPathValidation(); } - @Before + @BeforeEach public void before() { - Assume.assumeTrue(NativeCodeLoader.isNativeCodeLoaded()); + assumeTrue(NativeCodeLoader.isNativeCodeLoaded()); assumeNotWindows(); - Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null)); + assumeThat(DomainSocket.getLoadingFailureReason()).isNull(); final long osPageSize = NativeIO.POSIX.getCacheManipulator().getOperatingSystemPageSize(); Preconditions.checkState(BLOCK_SIZE >= osPageSize); Preconditions.checkState(BLOCK_SIZE % osPageSize == 0); } - @Rule - public ExpectedException exception = ExpectedException.none(); - /** * Read in-memory block with Short Circuit Read * Note: the test uses faked RAM_DISK from physical disk. @@ -97,10 +93,9 @@ public void testRamDiskShortCircuitRead() try { byte[] buf = new byte[BUFFER_LENGTH]; fis.read(0, buf, 0, BUFFER_LENGTH); - Assert.assertEquals(BUFFER_LENGTH, - fis.getReadStatistics().getTotalBytesRead()); - Assert.assertEquals(BUFFER_LENGTH, - fis.getReadStatistics().getTotalShortCircuitBytesRead()); + assertEquals(BUFFER_LENGTH, fis.getReadStatistics().getTotalBytesRead()); + assertEquals(BUFFER_LENGTH, + fis.getReadStatistics().getTotalShortCircuitBytesRead()); } finally { fis.close(); fis = null; @@ -133,10 +128,9 @@ public void tesScrDuringEviction() // Ensure path1 is still readable from the open SCR handle. fis.read(0, buf, 0, BUFFER_LENGTH); - assertThat(fis.getReadStatistics().getTotalBytesRead(), - is((long) 2 * BUFFER_LENGTH)); - assertThat(fis.getReadStatistics().getTotalShortCircuitBytesRead(), - is((long) 2 * BUFFER_LENGTH)); + assertThat(fis.getReadStatistics().getTotalBytesRead()).isEqualTo((long) 2 * BUFFER_LENGTH); + assertThat(fis.getReadStatistics().getTotalShortCircuitBytesRead()) + .isEqualTo((long) 2 * BUFFER_LENGTH); } } @@ -162,7 +156,7 @@ public void testLegacyScrAfterEviction() // subsequent legacy short-circuit reads in the ClientContext. // Assert that it didn't get disabled. ClientContext clientContext = client.getClientContext(); - Assert.assertFalse(clientContext.getDisableLegacyBlockReaderLocal()); + assertFalse(clientContext.getDisableLegacyBlockReaderLocal()); } private void doShortCircuitReadAfterEvictionTest() throws IOException, @@ -224,8 +218,9 @@ public void doShortCircuitReadBlockFileCorruptionTest() throws IOException, // verification catches it. ensureFileReplicasOnStorageType(path1, DEFAULT); cluster.corruptReplica(0, DFSTestUtil.getFirstBlock(fs, path1)); - exception.expect(ChecksumException.class); - DFSTestUtil.readFileBuffer(fs, path1); + assertThrows(ChecksumException.class, () -> { + DFSTestUtil.readFileBuffer(fs, path1); + }); } @Test @@ -260,7 +255,8 @@ public void doShortCircuitReadMetaFileCorruptionTest() throws IOException, // verification catches it. ensureFileReplicasOnStorageType(path1, DEFAULT); cluster.corruptMeta(0, DFSTestUtil.getFirstBlock(fs, path1)); - exception.expect(ChecksumException.class); - DFSTestUtil.readFileBuffer(fs, path1); + assertThrows(ChecksumException.class, () -> { + DFSTestUtil.readFileBuffer(fs, path1); + }); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestSpaceReservation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestSpaceReservation.java index de4d236617374..ef84c1732d5a9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestSpaceReservation.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestSpaceReservation.java @@ -31,11 +31,11 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import static org.apache.hadoop.hdfs.DFSConfigKeys.*; -import static org.hamcrest.core.Is.is; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.fail; +import static org.assertj.core.api.Assertions.assertThat; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; @@ -52,11 +52,10 @@ import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Daemon; -import org.junit.After; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import org.mockito.Mockito; import org.slf4j.event.Level; @@ -93,7 +92,7 @@ public class TestSpaceReservation { private static Random rand = new Random(); - @Before + @BeforeEach public void before() { conf = new HdfsConfiguration(); } @@ -141,7 +140,7 @@ private void startCluster(int blockSize, int numDatanodes, long perVolumeCapacit } } - @After + @AfterEach public void shutdownCluster() throws IOException { if (singletonVolumeRef != null) { singletonVolumeRef.close(); @@ -185,14 +184,14 @@ private void createFileAndTestSpaceReservation( int bytesWritten = buffer.length; // Check that space was reserved for a full block minus the bytesWritten. - assertThat(singletonVolume.getReservedForReplicas(), - is((long) fileBlockSize - bytesWritten)); + assertThat(singletonVolume.getReservedForReplicas()) + .isEqualTo((long) fileBlockSize - bytesWritten); out.close(); out = null; // Check that the reserved space has been released since we closed the // file. - assertThat(singletonVolume.getReservedForReplicas(), is(0L)); + assertThat(singletonVolume.getReservedForReplicas()).isEqualTo(0L); // Reopen the file for appends and write 1 more byte. out = fs.append(path); @@ -202,8 +201,8 @@ private void createFileAndTestSpaceReservation( // Check that space was again reserved for a full block minus the // bytesWritten so far. - assertThat(singletonVolume.getReservedForReplicas(), - is((long) fileBlockSize - bytesWritten)); + assertThat(singletonVolume.getReservedForReplicas()) + .isEqualTo((long) fileBlockSize - bytesWritten); // Write once again and again verify the available space. This ensures // that the reserved space is progressively adjusted to account for bytes @@ -211,8 +210,8 @@ private void createFileAndTestSpaceReservation( out.write(buffer); out.hsync(); bytesWritten += buffer.length; - assertThat(singletonVolume.getReservedForReplicas(), - is((long) fileBlockSize - bytesWritten)); + assertThat(singletonVolume.getReservedForReplicas()) + .isEqualTo((long) fileBlockSize - bytesWritten); } finally { if (out != null) { out.close(); @@ -220,23 +219,23 @@ private void createFileAndTestSpaceReservation( } } - @Test (timeout=300000) + @Test + @Timeout(value = 300) public void testWithDefaultBlockSize() throws IOException, InterruptedException { createFileAndTestSpaceReservation(GenericTestUtils.getMethodName(), BLOCK_SIZE); } - @Test (timeout=300000) + @Test + @Timeout(value = 300) public void testWithNonDefaultBlockSize() throws IOException, InterruptedException { // Same test as previous one, but with a non-default block size. createFileAndTestSpaceReservation(GenericTestUtils.getMethodName(), BLOCK_SIZE * 2); } - @Rule - public ExpectedException thrown = ExpectedException.none(); - - @Test (timeout=300000) + @Test + @Timeout(value = 300) public void testWithLimitedSpace() throws IOException { // Cluster with just enough space for a full block + meta. startCluster(BLOCK_SIZE, 1, 2 * BLOCK_SIZE - 1); @@ -244,30 +243,30 @@ public void testWithLimitedSpace() throws IOException { Path file1 = new Path("/" + methodName + ".01.dat"); Path file2 = new Path("/" + methodName + ".02.dat"); - // Create two files. - FSDataOutputStream os1 = null, os2 = null; + assertThrows(RemoteException.class, () -> { + // Create two files. + FSDataOutputStream os1 = null, os2 = null; + try { + os1 = fs.create(file1); + os2 = fs.create(file2); + + // Write one byte to the first file. + byte[] data = new byte[1]; + os1.write(data); + os1.hsync(); + + // Try to write one byte to the second file. + // The block allocation must fail. + os2.write(data); + os2.hsync(); + } finally { + if (os1 != null) { + os1.close(); + } - try { - os1 = fs.create(file1); - os2 = fs.create(file2); - - // Write one byte to the first file. - byte[] data = new byte[1]; - os1.write(data); - os1.hsync(); - - // Try to write one byte to the second file. - // The block allocation must fail. - thrown.expect(RemoteException.class); - os2.write(data); - os2.hsync(); - } finally { - if (os1 != null) { - os1.close(); + // os2.close() will fail as no block was allocated. } - - // os2.close() will fail as no block was allocated. - } + }); } /** @@ -278,7 +277,8 @@ public void testWithLimitedSpace() throws IOException { * * @throws IOException */ - @Test(timeout=300000) + @Test + @Timeout(value = 300) public void testSpaceReleasedOnUnexpectedEof() throws IOException, InterruptedException, TimeoutException { final short replication = 3; @@ -310,7 +310,8 @@ public Boolean get() { } @SuppressWarnings("unchecked") - @Test(timeout = 30000) + @Test + @Timeout(value = 30) public void testRBWFileCreationError() throws Exception { final short replication = 1; @@ -345,8 +346,9 @@ public void testRBWFileCreationError() throws Exception { // Ensure RBW space reserved is released assertTrue( - "Expected ZERO but got " + fsVolumeImpl.getReservedForReplicas(), - fsVolumeImpl.getReservedForReplicas() == 0); + + fsVolumeImpl.getReservedForReplicas() == 0, + "Expected ZERO but got " + fsVolumeImpl.getReservedForReplicas()); // Reserve some bytes to verify double clearing space should't happen fsVolumeImpl.reserveSpaceForReplica(1000); @@ -366,7 +368,8 @@ public void testRBWFileCreationError() throws Exception { assertTrue(fsVolumeImpl.getReservedForReplicas() == 1000); } - @Test(timeout = 30000) + @Test + @Timeout(value = 30) public void testReservedSpaceInJMXBean() throws Exception { final short replication = 1; @@ -391,7 +394,8 @@ public void testReservedSpaceInJMXBean() throws Exception { } } - @Test(timeout = 300000) + @Test + @Timeout(value = 300) public void testTmpSpaceReserve() throws Exception { final short replication = 2; @@ -425,11 +429,9 @@ public void testTmpSpaceReserve() throws Exception { performReReplication(file, true); - assertEquals("Wrong reserve space for Tmp ", byteCount1, - fsVolumeImpl.getRecentReserved()); + assertEquals(byteCount1, fsVolumeImpl.getRecentReserved(), "Wrong reserve space for Tmp "); - assertEquals("Reserved Tmp space is not released", 0, - fsVolumeImpl.getReservedForReplicas()); + assertEquals(0, fsVolumeImpl.getReservedForReplicas(), "Reserved Tmp space is not released"); } // Test when file creation fails @@ -470,11 +472,10 @@ public void testTmpSpaceReserve() throws Exception { performReReplication(file, false); - assertEquals("Wrong reserve space for Tmp ", byteCount2, - fsVolumeImpl.getRecentReserved()); + assertEquals(byteCount2, fsVolumeImpl.getRecentReserved(), "Wrong reserve space for Tmp "); - assertEquals("Tmp space is not released OR released twice", 1000, - fsVolumeImpl.getReservedForReplicas()); + assertEquals(1000, fsVolumeImpl.getReservedForReplicas(), + "Tmp space is not released OR released twice"); } } @@ -499,7 +500,8 @@ private void performReReplication(Path filePath, boolean waitForSuccess) * @throws IOException * @throws InterruptedException */ - @Test (timeout=600000) + @Test + @Timeout(value = 600) public void stressTest() throws IOException, InterruptedException { final int numWriters = 5; startCluster(SMALL_BLOCK_SIZE, 1, SMALL_BLOCK_SIZE * numWriters * 10); @@ -529,7 +531,7 @@ public void stressTest() throws IOException, InterruptedException { " files and hit " + numFailures + " failures"); // Check no space was leaked. - assertThat(singletonVolume.getReservedForReplicas(), is(0L)); + assertThat(singletonVolume.getReservedForReplicas()).isEqualTo(0L); } private static class Writer extends Daemon { @@ -592,7 +594,8 @@ public int getNumFailures() { } } - @Test(timeout = 30000) + @Test + @Timeout(value = 30) public void testReservedSpaceForAppend() throws Exception { final short replication = 3; startCluster(BLOCK_SIZE, replication, -1); @@ -632,7 +635,8 @@ public void testReservedSpaceForAppend() throws Exception { checkReservedSpace(expectedFile2Reserved); } - @Test(timeout = 30000) + @Test + @Timeout(value = 30) public void testReservedSpaceForPipelineRecovery() throws Exception { final short replication = 3; startCluster(BLOCK_SIZE, replication, -1); @@ -695,7 +699,8 @@ public Boolean get() { } } - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testReservedSpaceForLeaseRecovery() throws Exception { final short replication = 3; conf.setInt( @@ -759,7 +764,8 @@ public Boolean get() { * * @throws IOException */ - @Test(timeout = 300000) + @Test + @Timeout(value = 300) public void testReplicaInfoBytesReservedReleasedOnFinalize() throws IOException { short replication = 3; int bufferLength = 4096; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java index 202fb190f3d64..cf78c187ae291 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java @@ -17,9 +17,10 @@ */ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import java.io.File; import java.io.IOException; @@ -48,8 +49,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; /** Test if FSDataset#append, writeToRbw, and writeToTmp */ public class TestWriteToReplica { @@ -187,9 +187,9 @@ private void testAppend(String bpid, FsDatasetSpi dataSet, fvi.onBlockFileDeletion(bpid, -available); blocks[FINALIZED].setNumBytes(expectedLen + 100); dataSet.append(blocks[FINALIZED], newGS, expectedLen); - Assert.fail("Should not have space to append to an RWR replica" + blocks[RWR]); + fail("Should not have space to append to an RWR replica" + blocks[RWR]); } catch (DiskOutOfSpaceException e) { - Assert.assertTrue(e.getMessage().startsWith( + assertTrue(e.getMessage().startsWith( "Insufficient space for appending to ")); } fvi.onBlockFileDeletion(bpid, available); @@ -204,51 +204,51 @@ private void testAppend(String bpid, FsDatasetSpi dataSet, try { dataSet.append(blocks[TEMPORARY], blocks[TEMPORARY].getGenerationStamp()+1, blocks[TEMPORARY].getNumBytes()); - Assert.fail("Should not have appended to a temporary replica " + fail("Should not have appended to a temporary replica " + blocks[TEMPORARY]); } catch (ReplicaNotFoundException e) { - Assert.assertEquals(ReplicaNotFoundException.UNFINALIZED_REPLICA + - blocks[TEMPORARY], e.getMessage()); + assertEquals(ReplicaNotFoundException.UNFINALIZED_REPLICA + blocks[TEMPORARY], + e.getMessage()); } try { dataSet.append(blocks[RBW], blocks[RBW].getGenerationStamp()+1, blocks[RBW].getNumBytes()); - Assert.fail("Should not have appended to an RBW replica" + blocks[RBW]); + fail("Should not have appended to an RBW replica" + blocks[RBW]); } catch (ReplicaNotFoundException e) { - Assert.assertEquals(ReplicaNotFoundException.UNFINALIZED_REPLICA + - blocks[RBW], e.getMessage()); + assertEquals(ReplicaNotFoundException.UNFINALIZED_REPLICA + blocks[RBW], + e.getMessage()); } try { dataSet.append(blocks[RWR], blocks[RWR].getGenerationStamp()+1, blocks[RBW].getNumBytes()); - Assert.fail("Should not have appended to an RWR replica" + blocks[RWR]); + fail("Should not have appended to an RWR replica" + blocks[RWR]); } catch (ReplicaNotFoundException e) { - Assert.assertEquals(ReplicaNotFoundException.UNFINALIZED_REPLICA + - blocks[RWR], e.getMessage()); + assertEquals(ReplicaNotFoundException.UNFINALIZED_REPLICA + blocks[RWR], + e.getMessage()); } try { dataSet.append(blocks[RUR], blocks[RUR].getGenerationStamp()+1, blocks[RUR].getNumBytes()); - Assert.fail("Should not have appended to an RUR replica" + blocks[RUR]); + fail("Should not have appended to an RUR replica" + blocks[RUR]); } catch (ReplicaNotFoundException e) { - Assert.assertEquals(ReplicaNotFoundException.UNFINALIZED_REPLICA + - blocks[RUR], e.getMessage()); + assertEquals(ReplicaNotFoundException.UNFINALIZED_REPLICA + blocks[RUR], + e.getMessage()); } try { dataSet.append(blocks[NON_EXISTENT], blocks[NON_EXISTENT].getGenerationStamp(), blocks[NON_EXISTENT].getNumBytes()); - Assert.fail("Should not have appended to a non-existent replica " + + fail("Should not have appended to a non-existent replica " + blocks[NON_EXISTENT]); } catch (ReplicaNotFoundException e) { String expectMessage = ReplicaNotFoundException.NON_EXISTENT_REPLICA + blocks[NON_EXISTENT].getBlockPoolId() + ":" + blocks[NON_EXISTENT].getBlockId(); - Assert.assertEquals(expectMessage, e.getMessage()); + assertEquals(expectMessage, e.getMessage()); } newGS = blocks[FINALIZED].getGenerationStamp()+1; @@ -259,10 +259,10 @@ private void testAppend(String bpid, FsDatasetSpi dataSet, try { dataSet.recoverAppend(blocks[TEMPORARY], blocks[TEMPORARY].getGenerationStamp()+1, blocks[TEMPORARY].getNumBytes()); - Assert.fail("Should not have appended to a temporary replica " + fail("Should not have appended to a temporary replica " + blocks[TEMPORARY]); } catch (ReplicaNotFoundException e) { - Assert.assertTrue(e.getMessage().startsWith( + assertTrue(e.getMessage().startsWith( ReplicaNotFoundException.UNFINALIZED_AND_NONRBW_REPLICA)); } @@ -273,18 +273,18 @@ private void testAppend(String bpid, FsDatasetSpi dataSet, try { dataSet.recoverAppend(blocks[RWR], blocks[RWR].getGenerationStamp()+1, blocks[RBW].getNumBytes()); - Assert.fail("Should not have appended to an RWR replica" + blocks[RWR]); + fail("Should not have appended to an RWR replica" + blocks[RWR]); } catch (ReplicaNotFoundException e) { - Assert.assertTrue(e.getMessage().startsWith( + assertTrue(e.getMessage().startsWith( ReplicaNotFoundException.UNFINALIZED_AND_NONRBW_REPLICA)); } try { dataSet.recoverAppend(blocks[RUR], blocks[RUR].getGenerationStamp()+1, blocks[RUR].getNumBytes()); - Assert.fail("Should not have appended to an RUR replica" + blocks[RUR]); + fail("Should not have appended to an RUR replica" + blocks[RUR]); } catch (ReplicaNotFoundException e) { - Assert.assertTrue(e.getMessage().startsWith( + assertTrue(e.getMessage().startsWith( ReplicaNotFoundException.UNFINALIZED_AND_NONRBW_REPLICA)); } @@ -292,10 +292,10 @@ private void testAppend(String bpid, FsDatasetSpi dataSet, dataSet.recoverAppend(blocks[NON_EXISTENT], blocks[NON_EXISTENT].getGenerationStamp(), blocks[NON_EXISTENT].getNumBytes()); - Assert.fail("Should not have appended to a non-existent replica " + + fail("Should not have appended to a non-existent replica " + blocks[NON_EXISTENT]); } catch (ReplicaNotFoundException e) { - Assert.assertTrue(e.getMessage().startsWith( + assertTrue(e.getMessage().startsWith( ReplicaNotFoundException.NON_EXISTENT_REPLICA)); } } @@ -309,10 +309,10 @@ private void testClose(FsDatasetSpi dataSet, ExtendedBlock [] blocks) throws try { dataSet.recoverClose(blocks[TEMPORARY], blocks[TEMPORARY].getGenerationStamp()+1, blocks[TEMPORARY].getNumBytes()); - Assert.fail("Should not have recovered close a temporary replica " + fail("Should not have recovered close a temporary replica " + blocks[TEMPORARY]); } catch (ReplicaNotFoundException e) { - Assert.assertTrue(e.getMessage().startsWith( + assertTrue(e.getMessage().startsWith( ReplicaNotFoundException.UNFINALIZED_AND_NONRBW_REPLICA)); } @@ -323,18 +323,18 @@ private void testClose(FsDatasetSpi dataSet, ExtendedBlock [] blocks) throws try { dataSet.recoverClose(blocks[RWR], blocks[RWR].getGenerationStamp()+1, blocks[RBW].getNumBytes()); - Assert.fail("Should not have recovered close an RWR replica" + blocks[RWR]); + fail("Should not have recovered close an RWR replica" + blocks[RWR]); } catch (ReplicaNotFoundException e) { - Assert.assertTrue(e.getMessage().startsWith( + assertTrue(e.getMessage().startsWith( ReplicaNotFoundException.UNFINALIZED_AND_NONRBW_REPLICA)); } try { dataSet.recoverClose(blocks[RUR], blocks[RUR].getGenerationStamp()+1, blocks[RUR].getNumBytes()); - Assert.fail("Should not have recovered close an RUR replica" + blocks[RUR]); + fail("Should not have recovered close an RUR replica" + blocks[RUR]); } catch (ReplicaNotFoundException e) { - Assert.assertTrue(e.getMessage().startsWith( + assertTrue(e.getMessage().startsWith( ReplicaNotFoundException.UNFINALIZED_AND_NONRBW_REPLICA)); } @@ -342,10 +342,10 @@ private void testClose(FsDatasetSpi dataSet, ExtendedBlock [] blocks) throws dataSet.recoverClose(blocks[NON_EXISTENT], blocks[NON_EXISTENT].getGenerationStamp(), blocks[NON_EXISTENT].getNumBytes()); - Assert.fail("Should not have recovered close a non-existent replica " + + fail("Should not have recovered close a non-existent replica " + blocks[NON_EXISTENT]); } catch (ReplicaNotFoundException e) { - Assert.assertTrue(e.getMessage().startsWith( + assertTrue(e.getMessage().startsWith( ReplicaNotFoundException.NON_EXISTENT_REPLICA)); } } @@ -355,16 +355,16 @@ private void testWriteToRbw(FsDatasetImpl dataSet, ExtendedBlock[] blocks) throw dataSet.recoverRbw(blocks[FINALIZED], blocks[FINALIZED].getGenerationStamp()+1, 0L, blocks[FINALIZED].getNumBytes()); - Assert.fail("Should not have recovered a finalized replica " + + fail("Should not have recovered a finalized replica " + blocks[FINALIZED]); } catch (ReplicaNotFoundException e) { - Assert.assertTrue(e.getMessage().startsWith( + assertTrue(e.getMessage().startsWith( ReplicaNotFoundException.NON_RBW_REPLICA)); } try { dataSet.createRbw(StorageType.DEFAULT, null, blocks[FINALIZED], false); - Assert.fail("Should not have created a replica that's already " + + fail("Should not have created a replica that's already " + "finalized " + blocks[FINALIZED]); } catch (ReplicaAlreadyExistsException e) { } @@ -373,16 +373,16 @@ private void testWriteToRbw(FsDatasetImpl dataSet, ExtendedBlock[] blocks) throw dataSet.recoverRbw(blocks[TEMPORARY], blocks[TEMPORARY].getGenerationStamp()+1, 0L, blocks[TEMPORARY].getNumBytes()); - Assert.fail("Should not have recovered a temporary replica " + + fail("Should not have recovered a temporary replica " + blocks[TEMPORARY]); } catch (ReplicaNotFoundException e) { - Assert.assertTrue(e.getMessage().startsWith( + assertTrue(e.getMessage().startsWith( ReplicaNotFoundException.NON_RBW_REPLICA)); } try { dataSet.createRbw(StorageType.DEFAULT, null, blocks[TEMPORARY], false); - Assert.fail("Should not have created a replica that had created as " + + fail("Should not have created a replica that had created as " + "temporary " + blocks[TEMPORARY]); } catch (ReplicaAlreadyExistsException e) { } @@ -392,7 +392,7 @@ private void testWriteToRbw(FsDatasetImpl dataSet, ExtendedBlock[] blocks) throw try { dataSet.createRbw(StorageType.DEFAULT, null, blocks[RBW], false); - Assert.fail("Should not have created a replica that had created as RBW " + + fail("Should not have created a replica that had created as RBW " + blocks[RBW]); } catch (ReplicaAlreadyExistsException e) { } @@ -400,15 +400,15 @@ private void testWriteToRbw(FsDatasetImpl dataSet, ExtendedBlock[] blocks) throw try { dataSet.recoverRbw(blocks[RWR], blocks[RWR].getGenerationStamp()+1, 0L, blocks[RWR].getNumBytes()); - Assert.fail("Should not have recovered a RWR replica " + blocks[RWR]); + fail("Should not have recovered a RWR replica " + blocks[RWR]); } catch (ReplicaNotFoundException e) { - Assert.assertTrue(e.getMessage().startsWith( + assertTrue(e.getMessage().startsWith( ReplicaNotFoundException.NON_RBW_REPLICA)); } try { dataSet.createRbw(StorageType.DEFAULT, null, blocks[RWR], false); - Assert.fail("Should not have created a replica that was waiting to be " + + fail("Should not have created a replica that was waiting to be " + "recovered " + blocks[RWR]); } catch (ReplicaAlreadyExistsException e) { } @@ -416,15 +416,15 @@ private void testWriteToRbw(FsDatasetImpl dataSet, ExtendedBlock[] blocks) throw try { dataSet.recoverRbw(blocks[RUR], blocks[RUR].getGenerationStamp()+1, 0L, blocks[RUR].getNumBytes()); - Assert.fail("Should not have recovered a RUR replica " + blocks[RUR]); + fail("Should not have recovered a RUR replica " + blocks[RUR]); } catch (ReplicaNotFoundException e) { - Assert.assertTrue(e.getMessage().startsWith( + assertTrue(e.getMessage().startsWith( ReplicaNotFoundException.NON_RBW_REPLICA)); } try { dataSet.createRbw(StorageType.DEFAULT, null, blocks[RUR], false); - Assert.fail("Should not have created a replica that was under recovery " + + fail("Should not have created a replica that was under recovery " + blocks[RUR]); } catch (ReplicaAlreadyExistsException e) { } @@ -433,10 +433,10 @@ private void testWriteToRbw(FsDatasetImpl dataSet, ExtendedBlock[] blocks) throw dataSet.recoverRbw(blocks[NON_EXISTENT], blocks[NON_EXISTENT].getGenerationStamp()+1, 0L, blocks[NON_EXISTENT].getNumBytes()); - Assert.fail("Cannot recover a non-existent replica " + + fail("Cannot recover a non-existent replica " + blocks[NON_EXISTENT]); } catch (ReplicaNotFoundException e) { - Assert.assertTrue( + assertTrue( e.getMessage().contains(ReplicaNotFoundException.NON_EXISTENT_REPLICA)); } @@ -447,7 +447,7 @@ private void testWriteToTemporary(FsDatasetImpl dataSet, ExtendedBlock[] blocks) try { dataSet.createTemporary(StorageType.DEFAULT, null, blocks[FINALIZED], false); - Assert.fail("Should not have created a temporary replica that was " + + fail("Should not have created a temporary replica that was " + "finalized " + blocks[FINALIZED]); } catch (ReplicaAlreadyExistsException e) { } @@ -455,28 +455,28 @@ private void testWriteToTemporary(FsDatasetImpl dataSet, ExtendedBlock[] blocks) try { dataSet.createTemporary(StorageType.DEFAULT, null, blocks[TEMPORARY], false); - Assert.fail("Should not have created a replica that had created as" + + fail("Should not have created a replica that had created as" + "temporary " + blocks[TEMPORARY]); } catch (ReplicaAlreadyExistsException e) { } try { dataSet.createTemporary(StorageType.DEFAULT, null, blocks[RBW], false); - Assert.fail("Should not have created a replica that had created as RBW " + + fail("Should not have created a replica that had created as RBW " + blocks[RBW]); } catch (ReplicaAlreadyExistsException e) { } try { dataSet.createTemporary(StorageType.DEFAULT, null, blocks[RWR], false); - Assert.fail("Should not have created a replica that was waiting to be " + + fail("Should not have created a replica that was waiting to be " + "recovered " + blocks[RWR]); } catch (ReplicaAlreadyExistsException e) { } try { dataSet.createTemporary(StorageType.DEFAULT, null, blocks[RUR], false); - Assert.fail("Should not have created a replica that was under recovery " + + fail("Should not have created a replica that was under recovery " + blocks[RUR]); } catch (ReplicaAlreadyExistsException e) { } @@ -487,12 +487,12 @@ private void testWriteToTemporary(FsDatasetImpl dataSet, ExtendedBlock[] blocks) try { dataSet.createTemporary(StorageType.DEFAULT, null, blocks[NON_EXISTENT], false); - Assert.fail("Should not have created a replica that had already been " + fail("Should not have created a replica that had already been " + "created " + blocks[NON_EXISTENT]); } catch (Exception e) { - Assert.assertTrue( + assertTrue( e.getMessage().contains(blocks[NON_EXISTENT].getBlockName())); - Assert.assertTrue(e instanceof ReplicaAlreadyExistsException); + assertTrue(e instanceof ReplicaAlreadyExistsException); } long newGenStamp = blocks[NON_EXISTENT].getGenerationStamp() * 10; @@ -501,11 +501,11 @@ private void testWriteToTemporary(FsDatasetImpl dataSet, ExtendedBlock[] blocks) ReplicaInPipeline replicaInfo = dataSet.createTemporary(StorageType.DEFAULT, null, blocks[NON_EXISTENT], false).getReplica(); - Assert.assertTrue(replicaInfo.getGenerationStamp() == newGenStamp); - Assert.assertTrue( + assertTrue(replicaInfo.getGenerationStamp() == newGenStamp); + assertTrue( replicaInfo.getBlockId() == blocks[NON_EXISTENT].getBlockId()); } catch (ReplicaAlreadyExistsException e) { - Assert.fail("createTemporary should have allowed the block with newer " + fail("createTemporary should have allowed the block with newer " + " generation stamp to be created " + blocks[NON_EXISTENT]); } } @@ -526,8 +526,8 @@ public void testReplicaMapAfterDatanodeRestart() throws Exception { cluster.waitActive(); NameNode nn1 = cluster.getNameNode(0); NameNode nn2 = cluster.getNameNode(1); - assertNotNull("cannot create nn1", nn1); - assertNotNull("cannot create nn2", nn2); + assertNotNull(nn1, "cannot create nn1"); + assertNotNull(nn2, "cannot create nn2"); // check number of volumes in fsdataset DataNode dn = cluster.getDataNodes().get(0); @@ -537,7 +537,7 @@ public void testReplicaMapAfterDatanodeRestart() throws Exception { List volumes = null; try (FsDatasetSpi.FsVolumeReferences referredVols = dataSet.getFsVolumeReferences()) { // number of volumes should be 2 - [data1, data2] - assertEquals("number of volumes is wrong", 2, referredVols.size()); + assertEquals(2, referredVols.size(), "number of volumes is wrong"); volumes = new ArrayList<>(referredVols.size()); for (FsVolumeSpi vol : referredVols) { volumes.add(vol); @@ -547,8 +547,7 @@ public void testReplicaMapAfterDatanodeRestart() throws Exception { cluster.getNamesystem(0).getBlockPoolId(), cluster.getNamesystem(1).getBlockPoolId())); - Assert.assertTrue("Cluster should have 2 block pools", - bpList.size() == 2); + assertTrue(bpList.size() == 2, "Cluster should have 2 block pools"); createReplicas(bpList, volumes, cluster.getFsDatasetTestUtils(dn)); ReplicaMap oldReplicaMap = new ReplicaMap(); @@ -592,7 +591,7 @@ public void testRecoverInconsistentRbw() throws IOException { fsDataset.recoverRbw(blocks[RBW], blocks[RBW].getGenerationStamp(), 0L, rbw.getNumBytes()); // after the recovery, on disk length should equal acknowledged length. - Assert.assertTrue(rbw.getBytesOnDisk() == rbw.getBytesAcked()); + assertTrue(rbw.getBytesOnDisk() == rbw.getBytesAcked()); // reduce on disk length again; this time actually truncate the file to // simulate the data not being present @@ -619,14 +618,14 @@ private void testEqualityOfReplicaMap(ReplicaMap oldReplicaMap, ReplicaMap // replicaInfo from oldReplicaMap. for (String bpid: bpidList) { for (ReplicaInfo info: newReplicaMap.replicas(bpid)) { - assertNotNull("Volume map before restart didn't contain the " - + "blockpool: " + bpid, oldReplicaMap.replicas(bpid)); + assertNotNull(oldReplicaMap.replicas(bpid), + "Volume map before restart didn't contain the " + "blockpool: " + bpid); ReplicaInfo oldReplicaInfo = oldReplicaMap.get(bpid, info.getBlockId()); // Volume map after restart contains a blockpool id which - assertNotNull("Old Replica Map didnt't contain block with blockId: " + - info.getBlockId(), oldReplicaInfo); + assertNotNull(oldReplicaInfo, + "Old Replica Map didnt't contain block with blockId: " + info.getBlockId()); ReplicaState oldState = oldReplicaInfo.getState(); // Since after restart, all the RWR, RBW and RUR blocks gets @@ -649,8 +648,8 @@ private void testEqualityOfReplicaMap(ReplicaMap oldReplicaMap, ReplicaMap for (String bpid: bpidList) { for (ReplicaInfo replicaInfo: oldReplicaMap.replicas(bpid)) { if (replicaInfo.getState() != ReplicaState.TEMPORARY) { - Assert.fail("After datanode restart we lost the block with blockId: " - + replicaInfo.getBlockId()); + fail("After datanode restart we lost the block with blockId: " + + replicaInfo.getBlockId()); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/metrics/TestDataNodeOutlierDetectionViaMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/metrics/TestDataNodeOutlierDetectionViaMetrics.java index 0042bcb042646..5c6cd0d9707ec 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/metrics/TestDataNodeOutlierDetectionViaMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/metrics/TestDataNodeOutlierDetectionViaMetrics.java @@ -24,10 +24,9 @@ import org.apache.hadoop.hdfs.server.protocol.OutlierMetrics; import org.apache.hadoop.metrics2.lib.MetricsTestHelper; import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.slf4j.event.Level; @@ -36,25 +35,19 @@ import java.util.Random; import java.util.concurrent.TimeUnit; -import static org.hamcrest.core.Is.is; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; - +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; /** * Test that the {@link DataNodePeerMetrics} class is able to detect * outliers i.e. slow nodes via the metrics it maintains. + * Set a timeout for every test case. */ +@Timeout(300) public class TestDataNodeOutlierDetectionViaMetrics { public static final Logger LOG = LoggerFactory.getLogger(TestDataNodeOutlierDetectionViaMetrics.class); - /** - * Set a timeout for every test case. - */ - @Rule - public Timeout testTimeout = new Timeout(300_000); - // A few constants to keep the test run time short. private static final int WINDOW_INTERVAL_SECONDS = 3; private static final int ROLLING_AVERAGE_WINDOWS = 10; @@ -66,7 +59,7 @@ public class TestDataNodeOutlierDetectionViaMetrics { private Configuration conf; - @Before + @BeforeEach public void setup() { GenericTestUtils.setLogLevel(DataNodePeerMetrics.LOG, Level.TRACE); GenericTestUtils.setLogLevel(OutlierDetector.LOG, Level.TRACE); @@ -103,7 +96,7 @@ public Boolean get() { final Map outliers = peerMetrics.getOutliers(); LOG.info("Got back outlier nodes: {}", outliers); - assertThat(outliers.size(), is(1)); + assertThat(outliers.size()).isEqualTo(1); assertTrue(outliers.containsKey(slowNodeName)); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/metrics/TestSlowNodeDetector.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/metrics/TestSlowNodeDetector.java index 8a771e42e4fe4..c2de95c8a294b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/metrics/TestSlowNodeDetector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/metrics/TestSlowNodeDetector.java @@ -23,10 +23,9 @@ import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.slf4j.event.Level; @@ -37,21 +36,18 @@ import java.util.Map; import java.util.Set; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; /** * Unit tests for {@link OutlierDetector}. + * Set a timeout for every test case. */ +@Timeout(300) public class TestSlowNodeDetector { public static final Logger LOG = LoggerFactory.getLogger(TestSlowNodeDetector.class); - /** - * Set a timeout for every test case. - */ - @Rule - public Timeout testTimeout = new Timeout(300_000); - private final static double LOW_THRESHOLD = 1000; private final static long MIN_OUTLIER_DETECTION_PEERS = 3; @@ -235,7 +231,7 @@ public class TestSlowNodeDetector { private OutlierDetector slowNodeDetector; - @Before + @BeforeEach public void setup() { slowNodeDetector = new OutlierDetector(MIN_OUTLIER_DETECTION_PEERS, (long) LOW_THRESHOLD); @@ -251,10 +247,10 @@ public void testOutliersFromTestMatrix() { final Set outliers = slowNodeDetector.getOutliers(entry.getKey()).keySet(); assertTrue( + outliers.equals(entry.getValue()), "Running outlier detection on " + entry.getKey() + " was expected to yield set " + entry.getValue() + ", but " + - " we got set " + outliers, - outliers.equals(entry.getValue())); + " we got set " + outliers); } } @@ -276,9 +272,9 @@ public void testMediansFromTestMatrix() { Math.abs(median - expectedMedian) * 100.0 / expectedMedian; assertTrue( - "Set " + inputList + "; Expected median: " + - expectedMedian + ", got: " + median, - errorPercent < 0.001); + errorPercent < 0.001, + "Set " + inputList + "; Expected median: " + + expectedMedian + ", got: " + median); } } @@ -301,16 +297,16 @@ public void testMadsFromTestMatrix() { Math.abs(mad - expectedMad) * 100.0 / expectedMad; assertTrue( - "Set " + entry.getKey() + "; Expected M.A.D.: " + - expectedMad + ", got: " + mad, - errorPercent < 0.001); + errorPercent < 0.001, + "Set " + entry.getKey() + "; Expected M.A.D.: " + + expectedMad + ", got: " + mad); } else { // For an input list of size 1, the MAD should be 0.0. final Double epsilon = 0.000001; // Allow for some FP math error. assertTrue( - "Set " + entry.getKey() + "; Expected M.A.D.: " + - expectedMad + ", got: " + mad, - mad < epsilon); + mad < epsilon, + "Set " + entry.getKey() + "; Expected M.A.D.: " + + expectedMad + ", got: " + mad); } } } @@ -319,17 +315,21 @@ public void testMadsFromTestMatrix() { * Verify that {@link OutlierDetector#computeMedian(List)} throws when * passed an empty list. */ - @Test(expected=IllegalArgumentException.class) + @Test public void testMedianOfEmptyList() { - OutlierDetector.computeMedian(Collections.emptyList()); + assertThrows(IllegalArgumentException.class, () -> { + OutlierDetector.computeMedian(Collections.emptyList()); + }); } /** * Verify that {@link OutlierDetector#computeMad(List)} throws when * passed an empty list. */ - @Test(expected=IllegalArgumentException.class) + @Test public void testMadOfEmptyList() { - OutlierDetector.computeMedian(Collections.emptyList()); + assertThrows(IllegalArgumentException.class, () -> { + OutlierDetector.computeMedian(Collections.emptyList()); + }); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/TestDatanodeHttpXFrame.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/TestDatanodeHttpXFrame.java index b6ad5a9a5d6b7..23ea4e0158c32 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/TestDatanodeHttpXFrame.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/TestDatanodeHttpXFrame.java @@ -23,16 +23,18 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.http.HttpServer2; -import org.junit.After; -import org.junit.Assert; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Test; import java.io.IOException; import java.net.HttpURLConnection; import java.net.URL; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + /** * Test that X-Frame-Options works correctly with DatanodeHTTPServer. */ @@ -40,10 +42,7 @@ public class TestDatanodeHttpXFrame { private MiniDFSCluster cluster = null; - @Rule - public ExpectedException exception = ExpectedException.none(); - - @After + @AfterEach public void cleanUp() { if (cluster != null) { cluster.shutdown(); @@ -57,8 +56,8 @@ public void testDataNodeXFrameOptionsEnabled() throws Exception { cluster = createCluster(xFrameEnabled, null); HttpURLConnection conn = getConn(cluster); String xfoHeader = conn.getHeaderField("X-FRAME-OPTIONS"); - Assert.assertNotNull("X-FRAME-OPTIONS is absent in the header", xfoHeader); - Assert.assertTrue(xfoHeader.endsWith(HttpServer2.XFrameOption + assertNotNull(xfoHeader, "X-FRAME-OPTIONS is absent in the header"); + assertTrue(xfoHeader.endsWith(HttpServer2.XFrameOption .SAMEORIGIN.toString())); } @@ -68,13 +67,14 @@ public void testNameNodeXFrameOptionsDisabled() throws Exception { cluster = createCluster(xFrameEnabled, null); HttpURLConnection conn = getConn(cluster); String xfoHeader = conn.getHeaderField("X-FRAME-OPTIONS"); - Assert.assertNull("unexpected X-FRAME-OPTION in header", xfoHeader); + assertNull(xfoHeader, "unexpected X-FRAME-OPTION in header"); } @Test public void testDataNodeXFramewithInvalidOptions() throws Exception { - exception.expect(IllegalArgumentException.class); - cluster = createCluster(false, "Hadoop"); + assertThrows(IllegalArgumentException.class, () -> { + cluster = createCluster(false, "Hadoop"); + }); } private static MiniDFSCluster createCluster(boolean enabled, String diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/TestHostRestrictingAuthorizationFilterHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/TestHostRestrictingAuthorizationFilterHandler.java index a918a081e7f6a..a3bfc06ea595f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/TestHostRestrictingAuthorizationFilterHandler.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/TestHostRestrictingAuthorizationFilterHandler.java @@ -28,15 +28,15 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.server.common.HostRestrictingAuthorizationFilter; import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.net.InetSocketAddress; import java.net.SocketAddress; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; public class TestHostRestrictingAuthorizationFilterHandler { @@ -56,11 +56,11 @@ public void testRejectAll() { HttpMethod.GET, WebHdfsFileSystem.PATH_PREFIX + "/user/myName/fooFile?op=OPEN"); // we will send back an error so ensure our write returns false - assertFalse("Should get error back from handler for rejected request", - channel.writeInbound(httpRequest)); + assertFalse(channel.writeInbound(httpRequest), + "Should get error back from handler for rejected request"); DefaultHttpResponse channelResponse = (DefaultHttpResponse) channel.outboundMessages().poll(); - assertNotNull("Expected response to exist.", channelResponse); + assertNotNull(channelResponse, "Expected response to exist."); assertEquals(HttpResponseStatus.FORBIDDEN, channelResponse.status()); assertFalse(channel.isOpen()); } @@ -89,12 +89,11 @@ public void testMultipleAcceptedGETsOneChannel() { new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, WebHdfsFileSystem.PATH_PREFIX + "/allowed/file_three?op=OPEN"); - assertTrue("Should successfully accept request", - channel.writeInbound(allowedHttpRequest)); - assertTrue("Should successfully accept request, second time", - channel.writeInbound(allowedHttpRequest2)); - assertTrue("Should successfully accept request, third time", - channel.writeInbound(allowedHttpRequest3)); + assertTrue(channel.writeInbound(allowedHttpRequest), "Should successfully accept request"); + assertTrue(channel.writeInbound(allowedHttpRequest2), + "Should successfully accept request, second time"); + assertTrue(channel.writeInbound(allowedHttpRequest3), + "Should successfully accept request, third time"); } /* @@ -125,15 +124,14 @@ public void testMultipleChannels() { new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, WebHdfsFileSystem.PATH_PREFIX + "/allowed/file_three?op=OPEN"); - assertTrue("Should successfully accept request", - channel1.writeInbound(allowedHttpRequest)); - assertTrue("Should successfully accept request, second time", - channel2.writeInbound(allowedHttpRequest2)); + assertTrue(channel1.writeInbound(allowedHttpRequest), "Should successfully accept request"); + assertTrue(channel2.writeInbound(allowedHttpRequest2), + "Should successfully accept request, second time"); // verify closing one channel does not affect remaining channels channel1.close(); - assertTrue("Should successfully accept request, third time", - channel3.writeInbound(allowedHttpRequest3)); + assertTrue(channel3.writeInbound(allowedHttpRequest3), + "Should successfully accept request, third time"); } /* @@ -148,8 +146,7 @@ public void testAcceptGETFILECHECKSUM() { HttpMethod.GET, WebHdfsFileSystem.PATH_PREFIX + "/user/myName/fooFile?op" + "=GETFILECHECKSUM"); - assertTrue("Should successfully accept request", - channel.writeInbound(httpRequest)); + assertTrue(channel.writeInbound(httpRequest), "Should successfully accept request"); } /* diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestDataNodeUGIProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestDataNodeUGIProvider.java index 98465dc9e98ac..329bd1ff10dcc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestDataNodeUGIProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestDataNodeUGIProvider.java @@ -19,6 +19,8 @@ package org.apache.hadoop.hdfs.server.datanode.web.webhdfs; import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.KERBEROS; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.verify; @@ -52,10 +54,8 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Lists; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; public class TestDataNodeUGIProvider { private final URI uri = URI.create(WebHdfsConstants.WEBHDFS_SCHEME + "://" @@ -65,7 +65,8 @@ public class TestDataNodeUGIProvider { private final int LENGTH = 512; private final static int EXPIRE_AFTER_ACCESS = 5*1000; private Configuration conf; - @Before + + @BeforeEach public void setUp(){ conf = WebHdfsTestUtil.createConf(); conf.setInt(DFSConfigKeys.DFS_WEBHDFS_UGI_EXPIRE_AFTER_ACCESS_KEY, @@ -107,22 +108,19 @@ public void testUGICacheSecure() throws Exception { UserGroupInformation ugi11 = ugiProvider1.ugi(); UserGroupInformation ugi12 = ugiProvider1.ugi(); - Assert.assertEquals( - "With UGI cache, two UGIs returned by the same token should be same", - ugi11, ugi12); + assertEquals(ugi11, ugi12, + "With UGI cache, two UGIs returned by the same token should be same"); DataNodeUGIProvider ugiProvider2 = new DataNodeUGIProvider( new ParameterParser(new QueryStringDecoder(URI.create(uri2)), conf)); UserGroupInformation url21 = ugiProvider2.ugi(); UserGroupInformation url22 = ugiProvider2.ugi(); - Assert.assertEquals( - "With UGI cache, two UGIs returned by the same token should be same", - url21, url22); + assertEquals(url21, url22, + "With UGI cache, two UGIs returned by the same token should be same"); - Assert.assertNotEquals( - "With UGI cache, two UGIs for the different token should not be same", - ugi11, url22); + assertNotEquals(ugi11, url22, + "With UGI cache, two UGIs for the different token should not be same"); ugiProvider2.clearCache(); awaitCacheEmptyDueToExpiration(); @@ -131,12 +129,11 @@ public void testUGICacheSecure() throws Exception { String msg = "With cache eviction, two UGIs returned" + " by the same token should not be same"; - Assert.assertNotEquals(msg, ugi11, ugi12); - Assert.assertNotEquals(msg, url21, url22); + assertNotEquals(ugi11, ugi12, msg); + assertNotEquals(url21, url22, msg); - Assert.assertNotEquals( - "With UGI cache, two UGIs for the different token should not be same", - ugi11, url22); + assertNotEquals(ugi11, url22, + "With UGI cache, two UGIs for the different token should not be same"); } @Test @@ -158,22 +155,19 @@ public void testUGICacheInSecure() throws Exception { UserGroupInformation ugi11 = ugiProvider1.ugi(); UserGroupInformation ugi12 = ugiProvider1.ugi(); - Assert.assertEquals( - "With UGI cache, two UGIs for the same user should be same", ugi11, - ugi12); + assertEquals(ugi11, ugi12, + "With UGI cache, two UGIs for the same user should be same"); DataNodeUGIProvider ugiProvider2 = new DataNodeUGIProvider( new ParameterParser(new QueryStringDecoder(URI.create(uri2)), conf)); UserGroupInformation url21 = ugiProvider2.ugi(); UserGroupInformation url22 = ugiProvider2.ugi(); - Assert.assertEquals( - "With UGI cache, two UGIs for the same user should be same", url21, - url22); + assertEquals(url21, url22, + "With UGI cache, two UGIs for the same user should be same"); - Assert.assertNotEquals( - "With UGI cache, two UGIs for the different user should not be same", - ugi11, url22); + assertNotEquals(ugi11, url22, + "With UGI cache, two UGIs for the different user should not be same"); awaitCacheEmptyDueToExpiration(); ugi12 = ugiProvider1.ugi(); @@ -181,12 +175,11 @@ public void testUGICacheInSecure() throws Exception { String msg = "With cache eviction, two UGIs returned by" + " the same user should not be same"; - Assert.assertNotEquals(msg, ugi11, ugi12); - Assert.assertNotEquals(msg, url21, url22); + assertNotEquals(ugi11, ugi12, msg); + assertNotEquals(url21, url22, msg); - Assert.assertNotEquals( - "With UGI cache, two UGIs for the different user should not be same", - ugi11, url22); + assertNotEquals(ugi11, url22, + "With UGI cache, two UGIs for the different user should not be same"); } @Test diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestParameterParser.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestParameterParser.java index 40409985c3f02..8402bba85c959 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestParameterParser.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestParameterParser.java @@ -27,10 +27,12 @@ import org.apache.hadoop.hdfs.web.resources.OffsetParam; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import io.netty.handler.codec.http.QueryStringDecoder; @@ -52,7 +54,7 @@ public void testDeserializeHAToken() throws IOException { + DelegationParam.NAME + "=" + token.encodeToUrlString()); ParameterParser testParser = new ParameterParser(decoder, conf); final Token tok2 = testParser.delegationToken(); - Assert.assertTrue(HAUtilClient.isTokenForLogicalUri(tok2)); + assertTrue(HAUtilClient.isTokenForLogicalUri(tok2)); } @Test @@ -61,7 +63,7 @@ public void testNullToken() throws IOException { QueryStringDecoder decoder = new QueryStringDecoder( WebHdfsHandler.WEBHDFS_PREFIX + "/test"); ParameterParser testParser = new ParameterParser(decoder, conf); - Assert.assertNull(testParser.delegationToken()); + assertNull(testParser.delegationToken()); } @Test @@ -73,7 +75,7 @@ public void testDecodePath() { QueryStringDecoder decoder = new QueryStringDecoder( WebHdfsHandler.WEBHDFS_PREFIX + ESCAPED_PATH); ParameterParser testParser = new ParameterParser(decoder, conf); - Assert.assertEquals(EXPECTED_PATH, testParser.path()); + assertEquals(EXPECTED_PATH, testParser.path()); } @Test @@ -86,8 +88,7 @@ public void testCreateFlag() { EnumSet actual = testParser.createFlag(); EnumSet expected = EnumSet.of(CreateFlag.APPEND, CreateFlag.SYNC_BLOCK); - Assert.assertEquals(expected.toString(), actual.toString()); - + assertEquals(expected.toString(), actual.toString()); final String path1 = "/test1?createflag=append"; decoder = new QueryStringDecoder( @@ -96,14 +97,14 @@ public void testCreateFlag() { actual = testParser.createFlag(); expected = EnumSet.of(CreateFlag.APPEND); - Assert.assertEquals(expected, actual); + assertEquals(expected, actual); final String path2 = "/test1"; decoder = new QueryStringDecoder( WebHdfsHandler.WEBHDFS_PREFIX + path2); testParser = new ParameterParser(decoder, conf); actual = testParser.createFlag(); - Assert.assertEquals(0, actual.size()); + assertEquals(0, actual.size()); final String path3 = "/test1?createflag=create,overwrite"; decoder = new QueryStringDecoder( @@ -112,15 +113,14 @@ public void testCreateFlag() { actual = testParser.createFlag(); expected = EnumSet.of(CreateFlag.CREATE, CreateFlag .OVERWRITE); - Assert.assertEquals(expected.toString(), actual.toString()); - + assertEquals(expected.toString(), actual.toString()); final String path4 = "/test1?createflag="; decoder = new QueryStringDecoder( WebHdfsHandler.WEBHDFS_PREFIX + path4); testParser = new ParameterParser(decoder, conf); actual = testParser.createFlag(); - Assert.assertEquals(0, actual.size()); + assertEquals(0, actual.size()); //Incorrect value passed to createflag try { @@ -156,14 +156,14 @@ public void testOffset() throws IOException { final long X = 42; long offset = new OffsetParam(Long.toString(X)).getOffset(); - Assert.assertEquals("OffsetParam: ", X, offset); + assertEquals(X, offset, "OffsetParam: "); offset = new OffsetParam((String) null).getOffset(); - Assert.assertEquals("OffsetParam with null should have defaulted to 0", 0, offset); + assertEquals(0, offset, "OffsetParam with null should have defaulted to 0"); try { offset = new OffsetParam("abc").getValue(); - Assert.fail("OffsetParam with nondigit value should have thrown IllegalArgumentException"); + fail("OffsetParam with nondigit value should have thrown IllegalArgumentException"); } catch (IllegalArgumentException iae) { // Ignore }