diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 10d66d055ba35..b05292c050b3e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -61,11 +61,27 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
zookeeper
test-jar
test
+
+
+ org.junit.vintage
+ junit-vintage-engine
+
+
+ junit
+ junit
+
+
io.dropwizard.metrics
metrics-core
provided
+
+
+ junit
+ junit
+
+
org.xerial.snappy
@@ -154,12 +170,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
-
-
- junit
- junit
- test
-
org.apache.hadoop
hadoop-minikdc
@@ -169,11 +179,23 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
org.mockito
mockito-core
test
+
+
+ junit
+ junit
+
+
org.slf4j
slf4j-log4j12
provided
+
+
+ junit
+ junit
+
+
io.netty
@@ -184,6 +206,16 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
io.netty
netty-all
compile
+
+
+ org.junit.vintage
+ junit-vintage-engine
+
+
+ junit
+ junit
+
+
org.apache.hadoop
@@ -209,11 +241,27 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
com.fasterxml.jackson.core
jackson-databind
+
+
+ junit
+ junit
+
+
org.apache.curator
curator-test
test
+
+
+ org.junit.vintage
+ junit-vintage-engine
+
+
+ junit
+ junit
+
+
org.assertj
@@ -447,6 +495,23 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
+
+ org.openrewrite.maven
+ rewrite-maven-plugin
+ 4.9.0
+
+
+ org.openrewrite.java.testing.junit5.JUnit5BestPractices
+
+
+
+
+ org.openrewrite.recipe
+ rewrite-testing-frameworks
+ 1.7.1
+
+
+
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java
index dcd91c7d848bb..ad168b6b51a27 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java
@@ -18,10 +18,8 @@
package org.apache.hadoop;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
@@ -31,11 +29,7 @@
import org.apache.hadoop.ipc.RefreshRegistry;
import org.apache.hadoop.ipc.RefreshResponse;
-import org.junit.Test;
-import org.junit.Before;
-import org.junit.After;
-import org.junit.BeforeClass;
-import org.junit.AfterClass;
+import org.junit.jupiter.api.*;
import org.mockito.Mockito;
/**
@@ -51,7 +45,7 @@ public class TestGenericRefresh {
private static RefreshHandler firstHandler;
private static RefreshHandler secondHandler;
- @BeforeClass
+ @BeforeAll
public static void setUpBeforeClass() throws Exception {
config = new Configuration();
config.set("hadoop.security.authorization", "true");
@@ -61,14 +55,14 @@ public static void setUpBeforeClass() throws Exception {
cluster.waitActive();
}
- @AfterClass
+ @AfterAll
public static void tearDownBeforeClass() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
- @Before
+ @BeforeEach
public void setUp() throws Exception {
// Register Handlers, first one just sends an ok response
firstHandler = Mockito.mock(RefreshHandler.class);
@@ -85,7 +79,7 @@ public void setUp() throws Exception {
RefreshRegistry.defaultRegistry().register("secondHandler", secondHandler);
}
- @After
+ @AfterEach
public void tearDown() throws Exception {
RefreshRegistry.defaultRegistry().unregisterAll("firstHandler");
RefreshRegistry.defaultRegistry().unregisterAll("secondHandler");
@@ -96,7 +90,7 @@ public void testInvalidCommand() throws Exception {
DFSAdmin admin = new DFSAdmin(config);
String [] args = new String[]{"-refresh", "nn"};
int exitCode = admin.run(args);
- assertEquals("DFSAdmin should fail due to bad args", -1, exitCode);
+ assertEquals(-1, exitCode, "DFSAdmin should fail due to bad args");
}
@Test
@@ -105,7 +99,7 @@ public void testInvalidIdentifier() throws Exception {
String [] args = new String[]{"-refresh", "localhost:" +
cluster.getNameNodePort(), "unregisteredIdentity"};
int exitCode = admin.run(args);
- assertEquals("DFSAdmin should fail due to no handler registered", -1, exitCode);
+ assertEquals(-1, exitCode, "DFSAdmin should fail due to no handler registered");
}
@Test
@@ -114,7 +108,7 @@ public void testValidIdentifier() throws Exception {
String[] args = new String[]{"-refresh",
"localhost:" + cluster.getNameNodePort(), "firstHandler"};
int exitCode = admin.run(args);
- assertEquals("DFSAdmin should succeed", 0, exitCode);
+ assertEquals(0, exitCode, "DFSAdmin should succeed");
Mockito.verify(firstHandler).handleRefresh("firstHandler", new String[]{});
// Second handler was never called
@@ -128,11 +122,11 @@ public void testVariableArgs() throws Exception {
String[] args = new String[]{"-refresh", "localhost:" +
cluster.getNameNodePort(), "secondHandler", "one"};
int exitCode = admin.run(args);
- assertEquals("DFSAdmin should return 2", 2, exitCode);
+ assertEquals(2, exitCode, "DFSAdmin should return 2");
exitCode = admin.run(new String[]{"-refresh", "localhost:" +
cluster.getNameNodePort(), "secondHandler", "one", "two"});
- assertEquals("DFSAdmin should now return 3", 3, exitCode);
+ assertEquals(3, exitCode, "DFSAdmin should now return 3");
Mockito.verify(secondHandler).handleRefresh("secondHandler", new String[]{"one"});
Mockito.verify(secondHandler).handleRefresh("secondHandler", new String[]{"one", "two"});
@@ -147,7 +141,7 @@ public void testUnregistration() throws Exception {
String[] args = new String[]{"-refresh", "localhost:" +
cluster.getNameNodePort(), "firstHandler"};
int exitCode = admin.run(args);
- assertEquals("DFSAdmin should return -1", -1, exitCode);
+ assertEquals(-1, exitCode, "DFSAdmin should return -1");
}
@Test
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestRefreshCallQueue.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestRefreshCallQueue.java
index e21a5a307308a..c0fc178a46d86 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestRefreshCallQueue.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestRefreshCallQueue.java
@@ -18,10 +18,7 @@
package org.apache.hadoop;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
import java.io.IOException;
import java.net.BindException;
@@ -40,8 +37,8 @@
import org.apache.hadoop.ipc.FairCallQueue;
import org.apache.hadoop.metrics2.MetricsException;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.junit.After;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Test;
public class TestRefreshCallQueue {
private MiniDFSCluster cluster;
@@ -77,7 +74,7 @@ private void setUp(Class> queueClass) throws IOException {
}
}
- @After
+ @AfterEach
public void tearDown() throws IOException {
if (cluster != null) {
cluster.shutdown();
@@ -115,9 +112,9 @@ public void testRefresh() throws Exception {
mockQueuePuts = 0;
setUp(MockCallQueue.class);
- assertTrue("Mock queue should have been constructed",
- mockQueueConstructions > 0);
- assertTrue("Puts are routed through MockQueue", canPutInMockQueue());
+ assertTrue(
+ mockQueueConstructions > 0, "Mock queue should have been constructed");
+ assertTrue(canPutInMockQueue(), "Puts are routed through MockQueue");
int lastMockQueueConstructions = mockQueueConstructions;
// Replace queue with the queue specified in core-site.xml, which would be
@@ -125,13 +122,13 @@ public void testRefresh() throws Exception {
DFSAdmin admin = new DFSAdmin(config);
String [] args = new String[]{"-refreshCallQueue"};
int exitCode = admin.run(args);
- assertEquals("DFSAdmin should return 0", 0, exitCode);
+ assertEquals(0, exitCode, "DFSAdmin should return 0");
- assertEquals("Mock queue should have no additional constructions",
- lastMockQueueConstructions, mockQueueConstructions);
+ assertEquals(
+ lastMockQueueConstructions, mockQueueConstructions, "Mock queue should have no additional constructions");
try {
- assertFalse("Puts are routed through LBQ instead of MockQueue",
- canPutInMockQueue());
+ assertFalse(
+ canPutInMockQueue(), "Puts are routed through LBQ instead of MockQueue");
} catch (IOException ioe) {
fail("Could not put into queue at all");
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLI.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLI.java
index 9cf2180ff590c..10dfa93a16852 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLI.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLI.java
@@ -22,9 +22,9 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
public class TestAclCLI extends CLITestHelperDFS {
private MiniDFSCluster cluster = null;
@@ -38,7 +38,7 @@ protected void initConf() {
DFSConfigKeys.DFS_NAMENODE_POSIX_ACL_INHERITANCE_ENABLED_KEY, false);
}
- @Before
+ @BeforeEach
@Override
public void setUp() throws Exception {
super.setUp();
@@ -49,7 +49,7 @@ public void setUp() throws Exception {
username = System.getProperty("user.name");
}
- @After
+ @AfterEach
@Override
public void tearDown() throws Exception {
super.tearDown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLIWithPosixAclInheritance.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLIWithPosixAclInheritance.java
index ec31766576d2c..f05c8d445de72 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLIWithPosixAclInheritance.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLIWithPosixAclInheritance.java
@@ -17,9 +17,9 @@
*/
package org.apache.hadoop.cli;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_POSIX_ACL_INHERITANCE_ENABLED_KEY;
+import org.junit.jupiter.api.Test;
-import org.junit.Test;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_POSIX_ACL_INHERITANCE_ENABLED_KEY;
/**
* Test ACL CLI with POSIX ACL inheritance enabled.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCacheAdminCLI.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCacheAdminCLI.java
index 2f8dfa5b36bdf..fb16c38c05fca 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCacheAdminCLI.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCacheAdminCLI.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.cli;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -37,9 +37,9 @@
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.tools.CacheAdmin;
import org.apache.hadoop.security.authorize.PolicyProvider;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
import org.xml.sax.SAXException;
public class TestCacheAdminCLI extends CLITestHelper {
@@ -51,7 +51,7 @@ public class TestCacheAdminCLI extends CLITestHelper {
protected FileSystem fs = null;
protected String namenode = null;
- @Before
+ @BeforeEach
@Override
public void setUp() throws Exception {
super.setUp();
@@ -68,11 +68,11 @@ public void setUp() throws Exception {
username = System.getProperty("user.name");
fs = dfsCluster.getFileSystem();
- assertTrue("Not a HDFS: "+fs.getUri(),
- fs instanceof DistributedFileSystem);
+ assertTrue(
+ fs instanceof DistributedFileSystem, "Not a HDFS: " + fs.getUri());
}
- @After
+ @AfterEach
@Override
public void tearDown() throws Exception {
if (fs != null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoAdminCLI.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoAdminCLI.java
index afc668c5f4b85..ee9e2e95546e7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoAdminCLI.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoAdminCLI.java
@@ -23,7 +23,7 @@
import java.security.NoSuchAlgorithmException;
import java.util.UUID;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import org.apache.hadoop.cli.util.CLICommand;
import org.apache.hadoop.cli.util.CLICommandCryptoAdmin;
@@ -45,9 +45,9 @@
import org.apache.hadoop.hdfs.tools.CryptoAdmin;
import org.apache.hadoop.security.authorize.PolicyProvider;
import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
import org.xml.sax.SAXException;
public class TestCryptoAdminCLI extends CLITestHelperDFS {
@@ -56,7 +56,7 @@ public class TestCryptoAdminCLI extends CLITestHelperDFS {
protected String namenode = null;
private static File tmpDir;
- @Before
+ @BeforeEach
@Override
public void setUp() throws Exception {
super.setUp();
@@ -78,11 +78,11 @@ public void setUp() throws Exception {
username = System.getProperty("user.name");
fs = dfsCluster.getFileSystem();
- assertTrue("Not an HDFS: " + fs.getUri(),
- fs instanceof DistributedFileSystem);
+ assertTrue(
+ fs instanceof DistributedFileSystem, "Not an HDFS: " + fs.getUri());
}
- @After
+ @AfterEach
@Override
public void tearDown() throws Exception {
if (fs != null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestDeleteCLI.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestDeleteCLI.java
index 4c27f79e16a83..3172bf25323da 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestDeleteCLI.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestDeleteCLI.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.cli;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import org.apache.hadoop.cli.util.CLICommand;
import org.apache.hadoop.cli.util.CommandExecutor.Result;
@@ -27,16 +27,16 @@
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
public class TestDeleteCLI extends CLITestHelperDFS {
protected MiniDFSCluster dfsCluster = null;
protected FileSystem fs = null;
protected String namenode = null;
- @Before
+ @BeforeEach
@Override
public void setUp() throws Exception {
super.setUp();
@@ -49,11 +49,11 @@ public void setUp() throws Exception {
namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
fs = dfsCluster.getFileSystem();
- assertTrue("Not an HDFS: " + fs.getUri(),
- fs instanceof DistributedFileSystem);
+ assertTrue(
+ fs instanceof DistributedFileSystem, "Not an HDFS: " + fs.getUri());
}
- @After
+ @AfterEach
@Override
public void tearDown() throws Exception {
if (fs != null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestErasureCodingCLI.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestErasureCodingCLI.java
index 566755db996f6..c09d184e35d3d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestErasureCodingCLI.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestErasureCodingCLI.java
@@ -24,10 +24,10 @@
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.junit.After;
-import org.junit.Before;
import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
import org.junit.rules.Timeout;
import org.xml.sax.SAXException;
@@ -40,7 +40,7 @@ public class TestErasureCodingCLI extends CLITestHelper {
@Rule
public Timeout globalTimeout = new Timeout(300000);
- @Before
+ @BeforeEach
@Override
public void setUp() throws Exception {
super.setUp();
@@ -62,7 +62,7 @@ protected String getTestFile() {
return "testErasureCodingConf.xml";
}
- @After
+ @AfterEach
@Override
public void tearDown() throws Exception {
if (fs != null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java
index e0e78941c7384..f8ef3a3c876e6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.cli;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import org.apache.hadoop.cli.util.CLICommand;
import org.apache.hadoop.cli.util.CommandExecutor.Result;
@@ -28,9 +28,9 @@
import org.apache.hadoop.hdfs.HDFSPolicyProvider;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.security.authorize.PolicyProvider;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
public class TestHDFSCLI extends CLITestHelperDFS {
@@ -38,7 +38,7 @@ public class TestHDFSCLI extends CLITestHelperDFS {
protected FileSystem fs = null;
protected String namenode = null;
- @Before
+ @BeforeEach
@Override
public void setUp() throws Exception {
super.setUp();
@@ -63,8 +63,8 @@ public void setUp() throws Exception {
username = System.getProperty("user.name");
fs = dfsCluster.getFileSystem();
- assertTrue("Not a HDFS: "+fs.getUri(),
- fs instanceof DistributedFileSystem);
+ assertTrue(
+ fs instanceof DistributedFileSystem, "Not a HDFS: " + fs.getUri());
}
@Override
@@ -72,7 +72,7 @@ protected String getTestFile() {
return "testHDFSConf.xml";
}
- @After
+ @AfterEach
@Override
public void tearDown() throws Exception {
if (fs != null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestXAttrCLI.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestXAttrCLI.java
index d83baf3a97162..793d9de3f94ee 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestXAttrCLI.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestXAttrCLI.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.cli;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import org.apache.hadoop.cli.util.CLICommand;
import org.apache.hadoop.cli.util.CommandExecutor.Result;
@@ -28,16 +28,16 @@
import org.apache.hadoop.hdfs.HDFSPolicyProvider;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.security.authorize.PolicyProvider;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
public class TestXAttrCLI extends CLITestHelperDFS {
protected MiniDFSCluster dfsCluster = null;
protected FileSystem fs = null;
protected String namenode = null;
- @Before
+ @BeforeEach
@Override
public void setUp() throws Exception {
super.setUp();
@@ -53,8 +53,8 @@ public void setUp() throws Exception {
username = System.getProperty("user.name");
fs = dfsCluster.getFileSystem();
- assertTrue("Not a HDFS: "+fs.getUri(),
- fs instanceof DistributedFileSystem);
+ assertTrue(
+ fs instanceof DistributedFileSystem, "Not a HDFS: " + fs.getUri());
}
@Override
@@ -62,7 +62,7 @@ protected String getTestFile() {
return "testXAttrConf.xml";
}
- @After
+ @AfterEach
@Override
public void tearDown() throws Exception {
if (fs != null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java
index 99b1ddbbc1130..4f658d5062d68 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java
@@ -65,12 +65,11 @@
import org.apache.hadoop.net.unix.TemporarySocketDirectory;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Assume;
-import org.junit.BeforeClass;
import org.junit.Test;
-
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Assumptions;
+import org.junit.jupiter.api.BeforeAll;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions;
import java.util.function.Supplier;
@@ -85,7 +84,7 @@ public class TestEnhancedByteBufferAccess {
static private CacheManipulator prevCacheManipulator;
- @BeforeClass
+ @BeforeAll
public static void init() {
sockDir = new TemporarySocketDirectory();
DomainSocket.disableBindPathValidation();
@@ -99,7 +98,7 @@ public void mlock(String identifier,
});
}
- @AfterClass
+ @AfterAll
public static void teardown() {
// Restore the original CacheManipulator
NativeIO.POSIX.setCacheManipulator(prevCacheManipulator);
@@ -116,8 +115,8 @@ private static byte[] byteBufferToArray(ByteBuffer buf) {
(int) NativeIO.POSIX.getCacheManipulator().getOperatingSystemPageSize();
public static HdfsConfiguration initZeroCopyTest() {
- Assume.assumeTrue(NativeIO.isAvailable());
- Assume.assumeTrue(SystemUtils.IS_OS_UNIX);
+ Assumptions.assumeTrue(NativeIO.isAvailable());
+ Assumptions.assumeTrue(SystemUtils.IS_OS_UNIX);
HdfsConfiguration conf = new HdfsConfiguration();
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
@@ -152,10 +151,10 @@ public void testZeroCopyReads() throws Exception {
try {
DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
} catch (InterruptedException e) {
- Assert.fail("unexpected InterruptedException during " +
+ Assertions.fail("unexpected InterruptedException during " +
"waitReplication: " + e);
} catch (TimeoutException e) {
- Assert.fail("unexpected TimeoutException during " +
+ Assertions.fail("unexpected TimeoutException during " +
"waitReplication: " + e);
}
fsIn = fs.open(TEST_PATH);
@@ -165,13 +164,13 @@ public void testZeroCopyReads() throws Exception {
fsIn = fs.open(TEST_PATH);
ByteBuffer result = fsIn.read(null, BLOCK_SIZE,
EnumSet.of(ReadOption.SKIP_CHECKSUMS));
- Assert.assertEquals(BLOCK_SIZE, result.remaining());
+ Assertions.assertEquals(BLOCK_SIZE, result.remaining());
HdfsDataInputStream dfsIn = (HdfsDataInputStream)fsIn;
- Assert.assertEquals(BLOCK_SIZE,
+ Assertions.assertEquals(BLOCK_SIZE,
dfsIn.getReadStatistics().getTotalBytesRead());
- Assert.assertEquals(BLOCK_SIZE,
+ Assertions.assertEquals(BLOCK_SIZE,
dfsIn.getReadStatistics().getTotalZeroCopyBytesRead());
- Assert.assertArrayEquals(Arrays.copyOfRange(original, 0, BLOCK_SIZE),
+ Assertions.assertArrayEquals(Arrays.copyOfRange(original, 0, BLOCK_SIZE),
byteBufferToArray(result));
fsIn.releaseBuffer(result);
} finally {
@@ -198,10 +197,10 @@ public void testShortZeroCopyReads() throws Exception {
try {
DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
} catch (InterruptedException e) {
- Assert.fail("unexpected InterruptedException during " +
+ Assertions.fail("unexpected InterruptedException during " +
"waitReplication: " + e);
} catch (TimeoutException e) {
- Assert.fail("unexpected TimeoutException during " +
+ Assertions.fail("unexpected TimeoutException during " +
"waitReplication: " + e);
}
fsIn = fs.open(TEST_PATH);
@@ -214,20 +213,20 @@ public void testShortZeroCopyReads() throws Exception {
HdfsDataInputStream dfsIn = (HdfsDataInputStream)fsIn;
ByteBuffer result =
dfsIn.read(null, 2 * BLOCK_SIZE, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
- Assert.assertEquals(BLOCK_SIZE, result.remaining());
- Assert.assertEquals(BLOCK_SIZE,
+ Assertions.assertEquals(BLOCK_SIZE, result.remaining());
+ Assertions.assertEquals(BLOCK_SIZE,
dfsIn.getReadStatistics().getTotalBytesRead());
- Assert.assertEquals(BLOCK_SIZE,
+ Assertions.assertEquals(BLOCK_SIZE,
dfsIn.getReadStatistics().getTotalZeroCopyBytesRead());
- Assert.assertArrayEquals(Arrays.copyOfRange(original, 0, BLOCK_SIZE),
+ Assertions.assertArrayEquals(Arrays.copyOfRange(original, 0, BLOCK_SIZE),
byteBufferToArray(result));
dfsIn.releaseBuffer(result);
// Try to read (1 + ${BLOCK_SIZE}), but only get ${BLOCK_SIZE} because of the block size.
result =
dfsIn.read(null, 1 + BLOCK_SIZE, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
- Assert.assertEquals(BLOCK_SIZE, result.remaining());
- Assert.assertArrayEquals(Arrays.copyOfRange(original, BLOCK_SIZE, 2 * BLOCK_SIZE),
+ Assertions.assertEquals(BLOCK_SIZE, result.remaining());
+ Assertions.assertArrayEquals(Arrays.copyOfRange(original, BLOCK_SIZE, 2 * BLOCK_SIZE),
byteBufferToArray(result));
dfsIn.releaseBuffer(result);
} finally {
@@ -255,10 +254,10 @@ public void testZeroCopyReadsNoFallback() throws Exception {
try {
DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
} catch (InterruptedException e) {
- Assert.fail("unexpected InterruptedException during " +
+ Assertions.fail("unexpected InterruptedException during " +
"waitReplication: " + e);
} catch (TimeoutException e) {
- Assert.fail("unexpected TimeoutException during " +
+ Assertions.fail("unexpected TimeoutException during " +
"waitReplication: " + e);
}
fsIn = fs.open(TEST_PATH);
@@ -270,17 +269,17 @@ public void testZeroCopyReadsNoFallback() throws Exception {
ByteBuffer result;
try {
result = dfsIn.read(null, BLOCK_SIZE + 1, EnumSet.noneOf(ReadOption.class));
- Assert.fail("expected UnsupportedOperationException");
+ Assertions.fail("expected UnsupportedOperationException");
} catch (UnsupportedOperationException e) {
// expected
}
result = dfsIn.read(null, BLOCK_SIZE, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
- Assert.assertEquals(BLOCK_SIZE, result.remaining());
- Assert.assertEquals(BLOCK_SIZE,
+ Assertions.assertEquals(BLOCK_SIZE, result.remaining());
+ Assertions.assertEquals(BLOCK_SIZE,
dfsIn.getReadStatistics().getTotalBytesRead());
- Assert.assertEquals(BLOCK_SIZE,
+ Assertions.assertEquals(BLOCK_SIZE,
dfsIn.getReadStatistics().getTotalZeroCopyBytesRead());
- Assert.assertArrayEquals(Arrays.copyOfRange(original, 0, BLOCK_SIZE),
+ Assertions.assertArrayEquals(Arrays.copyOfRange(original, 0, BLOCK_SIZE),
byteBufferToArray(result));
} finally {
if (fsIn != null) fsIn.close();
@@ -311,16 +310,16 @@ public void visit(int numOutstandingMmaps,
LinkedMap evictable,
LinkedMap evictableMmapped) {
if (expectedNumOutstandingMmaps >= 0) {
- Assert.assertEquals(expectedNumOutstandingMmaps, numOutstandingMmaps);
+ Assertions.assertEquals(expectedNumOutstandingMmaps, numOutstandingMmaps);
}
if (expectedNumReplicas >= 0) {
- Assert.assertEquals(expectedNumReplicas, replicas.size());
+ Assertions.assertEquals(expectedNumReplicas, replicas.size());
}
if (expectedNumEvictable >= 0) {
- Assert.assertEquals(expectedNumEvictable, evictable.size());
+ Assertions.assertEquals(expectedNumEvictable, evictable.size());
}
if (expectedNumMmapedEvictable >= 0) {
- Assert.assertEquals(expectedNumMmapedEvictable, evictableMmapped.size());
+ Assertions.assertEquals(expectedNumMmapedEvictable, evictableMmapped.size());
}
}
}
@@ -346,10 +345,10 @@ public void testZeroCopyMmapCache() throws Exception {
try {
DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
} catch (InterruptedException e) {
- Assert.fail("unexpected InterruptedException during " +
+ Assertions.fail("unexpected InterruptedException during " +
"waitReplication: " + e);
} catch (TimeoutException e) {
- Assert.fail("unexpected TimeoutException during " +
+ Assertions.fail("unexpected TimeoutException during " +
"waitReplication: " + e);
}
fsIn = fs.open(TEST_PATH);
@@ -378,10 +377,10 @@ public void visit(int numOutstandingMmaps,
LinkedMap evictableMmapped) {
ShortCircuitReplica replica = replicas.get(
new ExtendedBlockId(firstBlock.getBlockId(), firstBlock.getBlockPoolId()));
- Assert.assertNotNull(replica);
- Assert.assertTrue(replica.hasMmap());
+ Assertions.assertNotNull(replica);
+ Assertions.assertTrue(replica.hasMmap());
// The replica should not yet be evictable, since we have it open.
- Assert.assertNull(replica.getEvictableTimeNs());
+ Assertions.assertNull(replica.getEvictableTimeNs());
}
});
@@ -449,10 +448,10 @@ public void testHdfsFallbackReads() throws Exception {
try {
DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
} catch (InterruptedException e) {
- Assert.fail("unexpected InterruptedException during " +
+ Assertions.fail("unexpected InterruptedException during " +
"waitReplication: " + e);
} catch (TimeoutException e) {
- Assert.fail("unexpected TimeoutException during " +
+ Assertions.fail("unexpected TimeoutException during " +
"waitReplication: " + e);
}
fsIn = fs.open(TEST_PATH);
@@ -493,22 +492,22 @@ private static void testFallbackImpl(InputStream stream,
stream instanceof ByteBufferReadable);
ByteBuffer result = ByteBufferUtil.fallbackRead(stream, bufferPool, 10);
- Assert.assertEquals(10, result.remaining());
- Assert.assertArrayEquals(Arrays.copyOfRange(original, 0, 10),
+ Assertions.assertEquals(10, result.remaining());
+ Assertions.assertArrayEquals(Arrays.copyOfRange(original, 0, 10),
byteBufferToArray(result));
result = ByteBufferUtil.fallbackRead(stream, bufferPool, 5000);
- Assert.assertEquals(5000, result.remaining());
- Assert.assertArrayEquals(Arrays.copyOfRange(original, 10, 5010),
+ Assertions.assertEquals(5000, result.remaining());
+ Assertions.assertArrayEquals(Arrays.copyOfRange(original, 10, 5010),
byteBufferToArray(result));
result = ByteBufferUtil.fallbackRead(stream, bufferPool, 9999999);
- Assert.assertEquals(11375, result.remaining());
- Assert.assertArrayEquals(Arrays.copyOfRange(original, 5010, 16385),
+ Assertions.assertEquals(11375, result.remaining());
+ Assertions.assertArrayEquals(Arrays.copyOfRange(original, 5010, 16385),
byteBufferToArray(result));
result = ByteBufferUtil.fallbackRead(stream, bufferPool, 10);
- Assert.assertNull(result);
+ Assertions.assertNull(result);
}
/**
@@ -533,10 +532,10 @@ public void testFallbackRead() throws Exception {
try {
DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
} catch (InterruptedException e) {
- Assert.fail("unexpected InterruptedException during " +
+ Assertions.fail("unexpected InterruptedException during " +
"waitReplication: " + e);
} catch (TimeoutException e) {
- Assert.fail("unexpected TimeoutException during " +
+ Assertions.fail("unexpected TimeoutException during " +
"waitReplication: " + e);
}
fsIn = fs.open(TEST_PATH);
@@ -618,7 +617,7 @@ public void testZeroCopyReadOfCachedData() throws Exception {
try {
result = fsIn.read(null, TEST_FILE_LENGTH / 2,
EnumSet.noneOf(ReadOption.class));
- Assert.fail("expected UnsupportedOperationException");
+ Assertions.fail("expected UnsupportedOperationException");
} catch (UnsupportedOperationException e) {
// expected
}
@@ -637,9 +636,9 @@ public void testZeroCopyReadOfCachedData() throws Exception {
result = fsIn.read(null, TEST_FILE_LENGTH,
EnumSet.noneOf(ReadOption.class));
} catch (UnsupportedOperationException e) {
- Assert.fail("expected to be able to read cached file via zero-copy");
+ Assertions.fail("expected to be able to read cached file via zero-copy");
}
- Assert.assertArrayEquals(Arrays.copyOfRange(original, 0,
+ Assertions.assertArrayEquals(Arrays.copyOfRange(original, 0,
BLOCK_SIZE), byteBufferToArray(result));
// Test that files opened after the cache operation has finished
// still get the benefits of zero-copy (regression test for HDFS-6086)
@@ -648,9 +647,9 @@ public void testZeroCopyReadOfCachedData() throws Exception {
result2 = fsIn2.read(null, TEST_FILE_LENGTH,
EnumSet.noneOf(ReadOption.class));
} catch (UnsupportedOperationException e) {
- Assert.fail("expected to be able to read cached file via zero-copy");
+ Assertions.fail("expected to be able to read cached file via zero-copy");
}
- Assert.assertArrayEquals(Arrays.copyOfRange(original, 0,
+ Assertions.assertArrayEquals(Arrays.copyOfRange(original, 0,
BLOCK_SIZE), byteBufferToArray(result2));
fsIn2.releaseBuffer(result2);
fsIn2.close();
@@ -688,10 +687,10 @@ public void visit(int numOutstandingMmaps,
Map failedLoads,
LinkedMap evictable,
LinkedMap evictableMmapped) {
- Assert.assertEquals(expectedOutstandingMmaps, numOutstandingMmaps);
+ Assertions.assertEquals(expectedOutstandingMmaps, numOutstandingMmaps);
ShortCircuitReplica replica =
replicas.get(ExtendedBlockId.fromExtendedBlock(block));
- Assert.assertNotNull(replica);
+ Assertions.assertNotNull(replica);
Slot slot = replica.getSlot();
if ((expectedIsAnchorable != slot.isAnchorable()) ||
(expectedIsAnchored != slot.isAnchored())) {
@@ -734,7 +733,7 @@ public void testClientMmapDisable() throws Exception {
fsIn = fs.open(TEST_PATH);
try {
fsIn.read(null, 1, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
- Assert.fail("expected zero-copy read to fail when client mmaps " +
+ Assertions.fail("expected zero-copy read to fail when client mmaps " +
"were disabled.");
} catch (UnsupportedOperationException e) {
}
@@ -764,7 +763,7 @@ public void testClientMmapDisable() throws Exception {
// Test EOF behavior
IOUtils.skipFully(fsIn, TEST_FILE_LENGTH - 1);
buf = fsIn.read(null, 1, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
- Assert.assertEquals(null, buf);
+ Assertions.assertEquals(null, buf);
} finally {
if (fsIn != null) fsIn.close();
if (fs != null) fs.close();
@@ -774,7 +773,7 @@ public void testClientMmapDisable() throws Exception {
@Test
public void test2GBMmapLimit() throws Exception {
- Assume.assumeTrue(BlockReaderTestUtil.shouldTestLargeFiles());
+ Assumptions.assumeTrue(BlockReaderTestUtil.shouldTestLargeFiles());
HdfsConfiguration conf = initZeroCopyTest();
final long TEST_FILE_LENGTH = 2469605888L;
conf.set(DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY, "NULL");
@@ -795,20 +794,20 @@ public void test2GBMmapLimit() throws Exception {
fsIn = fs.open(TEST_PATH);
buf1 = fsIn.read(null, 1, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
- Assert.assertEquals(1, buf1.remaining());
+ Assertions.assertEquals(1, buf1.remaining());
fsIn.releaseBuffer(buf1);
buf1 = null;
fsIn.seek(2147483640L);
buf1 = fsIn.read(null, 1024, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
- Assert.assertEquals(7, buf1.remaining());
- Assert.assertEquals(Integer.MAX_VALUE, buf1.limit());
+ Assertions.assertEquals(7, buf1.remaining());
+ Assertions.assertEquals(Integer.MAX_VALUE, buf1.limit());
fsIn.releaseBuffer(buf1);
buf1 = null;
- Assert.assertEquals(2147483647L, fsIn.getPos());
+ Assertions.assertEquals(2147483647L, fsIn.getPos());
try {
buf1 = fsIn.read(null, 1024,
EnumSet.of(ReadOption.SKIP_CHECKSUMS));
- Assert.fail("expected UnsupportedOperationException");
+ Assertions.fail("expected UnsupportedOperationException");
} catch (UnsupportedOperationException e) {
// expected; can't read past 2GB boundary.
}
@@ -825,13 +824,13 @@ public void test2GBMmapLimit() throws Exception {
fsIn2 = fs.open(TEST_PATH2);
fsIn2.seek(2147483640L);
buf2 = fsIn2.read(null, 1024, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
- Assert.assertEquals(8, buf2.remaining());
- Assert.assertEquals(2147483648L, fsIn2.getPos());
+ Assertions.assertEquals(8, buf2.remaining());
+ Assertions.assertEquals(2147483648L, fsIn2.getPos());
fsIn2.releaseBuffer(buf2);
buf2 = null;
buf2 = fsIn2.read(null, 1024, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
- Assert.assertEquals(1024, buf2.remaining());
- Assert.assertEquals(2147484672L, fsIn2.getPos());
+ Assertions.assertEquals(1024, buf2.remaining());
+ Assertions.assertEquals(2147484672L, fsIn2.getPos());
fsIn2.releaseBuffer(buf2);
buf2 = null;
} finally {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsCreateMkdir.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsCreateMkdir.java
index 0d3e6ff8379ff..2d2cb4d0afa47 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsCreateMkdir.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsCreateMkdir.java
@@ -27,10 +27,10 @@
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
public class TestFcHdfsCreateMkdir extends
FileContextCreateMkdirBaseTest {
@@ -44,7 +44,7 @@ protected FileContextTestHelper createFileContextHelper() {
}
- @BeforeClass
+ @BeforeAll
public static void clusterSetupAtBegining()
throws IOException, LoginException, URISyntaxException {
Configuration conf = new HdfsConfiguration();
@@ -56,7 +56,7 @@ public static void clusterSetupAtBegining()
}
- @AfterClass
+ @AfterAll
public static void ClusterShutdownAtEnd() throws Exception {
if (cluster != null) {
cluster.shutdown();
@@ -64,13 +64,13 @@ public static void ClusterShutdownAtEnd() throws Exception {
}
@Override
- @Before
+ @BeforeEach
public void setUp() throws Exception {
super.setUp();
}
@Override
- @After
+ @AfterEach
public void tearDown() throws Exception {
super.tearDown();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsPermission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsPermission.java
index 10ae1ef4b1e5e..5ff51a0a2af19 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsPermission.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsPermission.java
@@ -27,10 +27,10 @@
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
public class TestFcHdfsPermission extends FileContextPermissionBase {
@@ -51,7 +51,7 @@ protected FileContext getFileContext() {
return fc;
}
- @BeforeClass
+ @BeforeAll
public static void clusterSetupAtBegining()
throws IOException, LoginException, URISyntaxException {
Configuration conf = new HdfsConfiguration();
@@ -63,7 +63,7 @@ public static void clusterSetupAtBegining()
}
- @AfterClass
+ @AfterAll
public static void ClusterShutdownAtEnd() throws Exception {
if (cluster != null) {
cluster.shutdown();
@@ -71,13 +71,13 @@ public static void ClusterShutdownAtEnd() throws Exception {
}
@Override
- @Before
+ @BeforeEach
public void setUp() throws Exception {
super.setUp();
}
@Override
- @After
+ @AfterEach
public void tearDown() throws Exception {
super.tearDown();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSetUMask.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSetUMask.java
index eef22356778bf..cbd84897fcd2c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSetUMask.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSetUMask.java
@@ -30,12 +30,13 @@
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.security.UserGroupInformation;
import static org.apache.hadoop.fs.FileContextTestHelper.*;
-import org.junit.After;
-import org.junit.AfterClass;
import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
public class TestFcHdfsSetUMask {
@@ -78,7 +79,7 @@ public class TestFcHdfsSetUMask {
private static final FsPermission WIDE_OPEN_TEST_UMASK = FsPermission
.createImmutable((short) (0777 ^ 0777));
- @BeforeClass
+ @BeforeAll
public static void clusterSetupAtBegining()
throws IOException, LoginException, URISyntaxException {
Configuration conf = new HdfsConfiguration();
@@ -91,20 +92,20 @@ public static void clusterSetupAtBegining()
fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true);
}
- @AfterClass
+ @AfterAll
public static void ClusterShutdownAtEnd() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
- @Before
+ @BeforeEach
public void setUp() throws Exception {
fc.setUMask(WIDE_OPEN_TEST_UMASK);
fc.mkdir(fileContextTestHelper.getTestRootPath(fc), FileContext.DEFAULT_PERM, true);
}
- @After
+ @AfterEach
public void tearDown() throws Exception {
fc.delete(fileContextTestHelper.getTestRootPath(fc), true);
}
@@ -194,8 +195,8 @@ public void testMkdirWithExistingDir(FsPermission umask,
fc.setUMask(umask);
fc.mkdir(f, FileContext.DEFAULT_PERM, true);
Assert.assertTrue(isDir(fc, f));
- Assert.assertEquals("permissions on directory are wrong",
- expectedPerms, fc.getFileStatus(f).getPermission());
+ Assertions.assertEquals(
+ expectedPerms, fc.getFileStatus(f).getPermission(), "permissions on directory are wrong");
}
public void testMkdirRecursiveWithNonExistingDir(FsPermission umask,
@@ -205,11 +206,11 @@ public void testMkdirRecursiveWithNonExistingDir(FsPermission umask,
fc.setUMask(umask);
fc.mkdir(f, FileContext.DEFAULT_PERM, true);
Assert.assertTrue(isDir(fc, f));
- Assert.assertEquals("permissions on directory are wrong",
- expectedPerms, fc.getFileStatus(f).getPermission());
+ Assertions.assertEquals(
+ expectedPerms, fc.getFileStatus(f).getPermission(), "permissions on directory are wrong");
Path fParent = fileContextTestHelper.getTestRootPath(fc, "NonExistant2");
- Assert.assertEquals("permissions on parent directory are wrong",
- expectedParentPerms, fc.getFileStatus(fParent).getPermission());
+ Assertions.assertEquals(
+ expectedParentPerms, fc.getFileStatus(fParent).getPermission(), "permissions on parent directory are wrong");
}
@@ -219,8 +220,8 @@ public void testCreateRecursiveWithExistingDir(FsPermission umask,
fc.setUMask(umask);
createFile(fc, f);
Assert.assertTrue(isFile(fc, f));
- Assert.assertEquals("permissions on file are wrong",
- expectedPerms , fc.getFileStatus(f).getPermission());
+ Assertions.assertEquals(
+ expectedPerms, fc.getFileStatus(f).getPermission(), "permissions on file are wrong");
}
@@ -233,10 +234,10 @@ public void testCreateRecursiveWithNonExistingDir(FsPermission umask,
fc.setUMask(umask);
createFile(fc, f);
Assert.assertTrue(isFile(fc, f));
- Assert.assertEquals("permissions on file are wrong",
- expectedFilePerms, fc.getFileStatus(f).getPermission());
- Assert.assertEquals("permissions on parent directory are wrong",
- expectedDirPerms, fc.getFileStatus(fParent).getPermission());
+ Assertions.assertEquals(
+ expectedFilePerms, fc.getFileStatus(f).getPermission(), "permissions on file are wrong");
+ Assertions.assertEquals(
+ expectedDirPerms, fc.getFileStatus(fParent).getPermission(), "permissions on parent directory are wrong");
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java
index f8adf017332d1..9006052f1d4c8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java
@@ -17,15 +17,6 @@
*/
package org.apache.hadoop.fs;
-import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
-import static org.junit.Assert.*;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.UUID;
-import java.util.regex.Pattern;
-
-import org.apache.hadoop.thirdparty.com.google.common.collect.Ordering;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.FsPermission;
@@ -34,7 +25,23 @@
import org.apache.hadoop.hdfs.server.namenode.INodeId;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.*;
+import org.apache.hadoop.thirdparty.com.google.common.collect.Ordering;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Disabled;
+import org.junit.jupiter.api.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.UUID;
+import java.util.regex.Pattern;
+
+import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
public class TestGlobPaths {
@@ -65,7 +72,7 @@ public boolean accept(Path path) {
static private String USER_DIR;
private final Path[] path = new Path[NUM_OF_PATHS];
- @BeforeClass
+ @BeforeAll
public static void setUp() throws Exception {
final Configuration conf = new HdfsConfiguration();
dfsCluster = new MiniDFSCluster.Builder(conf).build();
@@ -81,7 +88,7 @@ public static void setUp() throws Exception {
USER_DIR = fs.getHomeDirectory().toUri().getPath().toString();
}
- @AfterClass
+ @AfterAll
public static void tearDown() throws Exception {
if(dfsCluster!=null) {
dfsCluster.shutdown();
@@ -102,8 +109,8 @@ public void testCRInPathGlob() throws IOException {
fs.createNewFile(fNormal);
fs.createNewFile(fWithCR);
statuses = fs.globStatus(new Path(d1, "f1*"));
- assertEquals("Expected both normal and CR-carrying files in result: ",
- 2, statuses.length);
+ assertEquals(
+ 2, statuses.length, "Expected both normal and CR-carrying files in result: ");
cleanupDFS();
}
@@ -892,14 +899,14 @@ void run() throws Exception {
// Test simple glob
FileStatus[] statuses = wrap.globStatus(new Path(USER_DIR + "/alpha/*"),
new AcceptAllPathFilter());
- Assert.assertEquals(1, statuses.length);
- Assert.assertEquals(USER_DIR + "/alpha/beta", statuses[0].getPath()
+ Assertions.assertEquals(1, statuses.length);
+ Assertions.assertEquals(USER_DIR + "/alpha/beta", statuses[0].getPath()
.toUri().getPath());
// Test glob through symlink
statuses = wrap.globStatus(new Path(USER_DIR + "/alphaLink/*"),
new AcceptAllPathFilter());
- Assert.assertEquals(1, statuses.length);
- Assert.assertEquals(USER_DIR + "/alphaLink/beta", statuses[0].getPath()
+ Assertions.assertEquals(1, statuses.length);
+ Assertions.assertEquals(USER_DIR + "/alphaLink/beta", statuses[0].getPath()
.toUri().getPath());
// If the terminal path component in a globbed path is a symlink,
// we don't dereference that link.
@@ -907,20 +914,20 @@ void run() throws Exception {
+ "/alphaLink/betaLink"), false);
statuses = wrap.globStatus(new Path(USER_DIR + "/alpha/betaLi*"),
new AcceptAllPathFilter());
- Assert.assertEquals(1, statuses.length);
- Assert.assertEquals(USER_DIR + "/alpha/betaLink", statuses[0].getPath()
+ Assertions.assertEquals(1, statuses.length);
+ Assertions.assertEquals(USER_DIR + "/alpha/betaLink", statuses[0].getPath()
.toUri().getPath());
// todo: test symlink-to-symlink-to-dir, etc.
}
}
- @Ignore
+ @Disabled
@Test
public void testGlobWithSymlinksOnFS() throws Exception {
testOnFileSystem(new TestGlobWithSymlinks(false));
}
- @Ignore
+ @Disabled
@Test
public void testGlobWithSymlinksOnFC() throws Exception {
testOnFileContext(new TestGlobWithSymlinks(true));
@@ -951,20 +958,20 @@ void run() throws Exception {
// Test glob through symlink to a symlink to a directory
FileStatus statuses[] = wrap.globStatus(new Path(USER_DIR
+ "/alphaLinkLink"), new AcceptAllPathFilter());
- Assert.assertEquals(1, statuses.length);
- Assert.assertEquals(USER_DIR + "/alphaLinkLink", statuses[0].getPath()
+ Assertions.assertEquals(1, statuses.length);
+ Assertions.assertEquals(USER_DIR + "/alphaLinkLink", statuses[0].getPath()
.toUri().getPath());
statuses = wrap.globStatus(new Path(USER_DIR + "/alphaLinkLink/*"),
new AcceptAllPathFilter());
- Assert.assertEquals(1, statuses.length);
- Assert.assertEquals(USER_DIR + "/alphaLinkLink/beta", statuses[0]
+ Assertions.assertEquals(1, statuses.length);
+ Assertions.assertEquals(USER_DIR + "/alphaLinkLink/beta", statuses[0]
.getPath().toUri().getPath());
// Test glob of dangling symlink (theta does not actually exist)
wrap.createSymlink(new Path(USER_DIR + "theta"), new Path(USER_DIR
+ "/alpha/kappa"), false);
statuses = wrap.globStatus(new Path(USER_DIR + "/alpha/kappa/kappa"),
new AcceptAllPathFilter());
- Assert.assertNull(statuses);
+ Assertions.assertNull(statuses);
// Test glob of symlinks
wrap.createFile(USER_DIR + "/alpha/beta/gamma");
wrap.createSymlink(new Path(USER_DIR + "gamma"), new Path(USER_DIR
@@ -975,12 +982,12 @@ void run() throws Exception {
USER_DIR + "/alpha/beta/gammaLinkLinkLink"), false);
statuses = wrap.globStatus(new Path(USER_DIR
+ "/alpha/*/gammaLinkLinkLink"), new AcceptAllPathFilter());
- Assert.assertEquals(1, statuses.length);
- Assert.assertEquals(USER_DIR + "/alpha/beta/gammaLinkLinkLink",
+ Assertions.assertEquals(1, statuses.length);
+ Assertions.assertEquals(USER_DIR + "/alpha/beta/gammaLinkLinkLink",
statuses[0].getPath().toUri().getPath());
statuses = wrap.globStatus(new Path(USER_DIR + "/alpha/beta/*"),
new AcceptAllPathFilter());
- Assert.assertEquals(USER_DIR + "/alpha/beta/gamma;" + USER_DIR
+ Assertions.assertEquals(USER_DIR + "/alpha/beta/gamma;" + USER_DIR
+ "/alpha/beta/gammaLink;" + USER_DIR + "/alpha/beta/gammaLinkLink;"
+ USER_DIR + "/alpha/beta/gammaLinkLinkLink",
TestPath.mergeStatuses(statuses));
@@ -992,17 +999,17 @@ void run() throws Exception {
statuses = wrap.globStatus(
new Path(USER_DIR + "/tweedledee/unobtainium"),
new AcceptAllPathFilter());
- Assert.assertNull(statuses);
+ Assertions.assertNull(statuses);
}
}
- @Ignore
+ @Disabled
@Test
public void testGlobWithSymlinksToSymlinksOnFS() throws Exception {
testOnFileSystem(new TestGlobWithSymlinksToSymlinks(false));
}
- @Ignore
+ @Disabled
@Test
public void testGlobWithSymlinksToSymlinksOnFC() throws Exception {
testOnFileContext(new TestGlobWithSymlinksToSymlinks(true));
@@ -1032,31 +1039,31 @@ void run() throws Exception {
// PathFilter
FileStatus statuses[] = wrap.globStatus(
new Path(USER_DIR + "/alpha/beta"), new AcceptPathsEndingInZ());
- Assert.assertNull(statuses);
+ Assertions.assertNull(statuses);
statuses = wrap.globStatus(new Path(USER_DIR + "/alphaLinkz/betaz"),
new AcceptPathsEndingInZ());
- Assert.assertEquals(1, statuses.length);
- Assert.assertEquals(USER_DIR + "/alphaLinkz/betaz", statuses[0].getPath()
+ Assertions.assertEquals(1, statuses.length);
+ Assertions.assertEquals(USER_DIR + "/alphaLinkz/betaz", statuses[0].getPath()
.toUri().getPath());
statuses = wrap.globStatus(new Path(USER_DIR + "/*/*"),
new AcceptPathsEndingInZ());
- Assert.assertEquals(USER_DIR + "/alpha/betaz;" + USER_DIR
+ Assertions.assertEquals(USER_DIR + "/alpha/betaz;" + USER_DIR
+ "/alphaLinkz/betaz", TestPath.mergeStatuses(statuses));
statuses = wrap.globStatus(new Path(USER_DIR + "/*/*"),
new AcceptAllPathFilter());
- Assert.assertEquals(USER_DIR + "/alpha/beta;" + USER_DIR
+ Assertions.assertEquals(USER_DIR + "/alpha/beta;" + USER_DIR
+ "/alpha/betaz;" + USER_DIR + "/alphaLinkz/beta;" + USER_DIR
+ "/alphaLinkz/betaz", TestPath.mergeStatuses(statuses));
}
}
- @Ignore
+ @Disabled
@Test
public void testGlobSymlinksWithCustomPathFilterOnFS() throws Exception {
testOnFileSystem(new TestGlobSymlinksWithCustomPathFilter(false));
}
- @Ignore
+ @Disabled
@Test
public void testGlobSymlinksWithCustomPathFilterOnFC() throws Exception {
testOnFileContext(new TestGlobSymlinksWithCustomPathFilter(true));
@@ -1078,22 +1085,22 @@ void run() throws Exception {
+ "/alphaLink"), false);
FileStatus statuses[] = wrap.globStatus(
new Path(USER_DIR + "/alphaLink"), new AcceptAllPathFilter());
- Assert.assertEquals(1, statuses.length);
+ Assertions.assertEquals(1, statuses.length);
Path path = statuses[0].getPath();
- Assert.assertEquals(USER_DIR + "/alpha", path.toUri().getPath());
- Assert.assertEquals("hdfs", path.toUri().getScheme());
+ Assertions.assertEquals(USER_DIR + "/alpha", path.toUri().getPath());
+ Assertions.assertEquals("hdfs", path.toUri().getScheme());
// FileContext can list a file:/// URI.
// Since everyone should have the root directory, we list that.
statuses = fc.util().globStatus(new Path("file:///"),
new AcceptAllPathFilter());
- Assert.assertEquals(1, statuses.length);
+ Assertions.assertEquals(1, statuses.length);
Path filePath = statuses[0].getPath();
- Assert.assertEquals("file", filePath.toUri().getScheme());
- Assert.assertEquals("/", filePath.toUri().getPath());
+ Assertions.assertEquals("file", filePath.toUri().getScheme());
+ Assertions.assertEquals("/", filePath.toUri().getPath());
// The FileSystem should have scheme 'hdfs'
- Assert.assertEquals("hdfs", fs.getScheme());
+ Assertions.assertEquals("hdfs", fs.getScheme());
}
}
@@ -1176,16 +1183,15 @@ void run() throws Exception {
try {
wrap.globStatus(new Path("/no*/*"),
new AcceptAllPathFilter());
- Assert.fail("expected to get an AccessControlException when " +
+ Assertions.fail("expected to get an AccessControlException when " +
"globbing through a directory we don't have permissions " +
"to list.");
} catch (AccessControlException ioe) {
}
- Assert.assertEquals("/norestrictions/val",
- TestPath.mergeStatuses(wrap.globStatus(
- new Path("/norestrictions/*"),
- new AcceptAllPathFilter())));
+ Assertions.assertEquals("/norestrictions/val", TestPath.mergeStatuses(
+ wrap.globStatus(new Path("/norestrictions/*"),
+ new AcceptAllPathFilter())));
}
}
@@ -1209,9 +1215,8 @@ private class TestReservedHdfsPaths extends FSTestWrapperGlobTest {
void run() throws Exception {
String reservedRoot = "/.reserved/.inodes/" + INodeId.ROOT_INODE_ID;
- Assert.assertEquals(reservedRoot,
- TestPath.mergeStatuses(wrap.
- globStatus(new Path(reservedRoot), new AcceptAllPathFilter())));
+ Assertions.assertEquals(reservedRoot, TestPath.mergeStatuses(
+ wrap.globStatus(new Path(reservedRoot), new AcceptAllPathFilter())));
}
}
@@ -1240,8 +1245,8 @@ void run() throws Exception {
privWrap.setOwner(new Path("/"), newOwner, null);
FileStatus[] status =
wrap.globStatus(rootPath, new AcceptAllPathFilter());
- Assert.assertEquals(1, status.length);
- Assert.assertEquals(newOwner, status[0].getOwner());
+ Assertions.assertEquals(1, status.length);
+ Assertions.assertEquals(newOwner, status[0].getOwner());
privWrap.setOwner(new Path("/"), oldRootStatus.getOwner(), null);
}
}
@@ -1273,8 +1278,8 @@ void run() throws Exception {
FileStatus[] statuses =
wrap.globStatus(new Path("/filed*/alpha"),
new AcceptAllPathFilter());
- Assert.assertEquals(1, statuses.length);
- Assert.assertEquals("/filed_away/alpha", statuses[0].getPath()
+ Assertions.assertEquals(1, statuses.length);
+ Assertions.assertEquals("/filed_away/alpha", statuses[0].getPath()
.toUri().getPath());
privWrap.mkdir(new Path("/filed_away/alphabet"),
new FsPermission((short)0777), true);
@@ -1282,8 +1287,8 @@ void run() throws Exception {
new FsPermission((short)0777), true);
statuses = wrap.globStatus(new Path("/filed*/alph*/*b*"),
new AcceptAllPathFilter());
- Assert.assertEquals(1, statuses.length);
- Assert.assertEquals("/filed_away/alphabet/abc", statuses[0].getPath()
+ Assertions.assertEquals(1, statuses.length);
+ Assertions.assertEquals("/filed_away/alphabet/abc", statuses[0].getPath()
.toUri().getPath());
} finally {
privWrap.delete(new Path("/filed"), true);
@@ -1308,12 +1313,12 @@ public void testLocalFilesystem() throws Exception {
FileSystem fs = FileSystem.getLocal(conf);
String localTmp = System.getProperty("java.io.tmpdir");
Path base = new Path(new Path(localTmp), UUID.randomUUID().toString());
- Assert.assertTrue(fs.mkdirs(base));
- Assert.assertTrue(fs.mkdirs(new Path(base, "e")));
- Assert.assertTrue(fs.mkdirs(new Path(base, "c")));
- Assert.assertTrue(fs.mkdirs(new Path(base, "a")));
- Assert.assertTrue(fs.mkdirs(new Path(base, "d")));
- Assert.assertTrue(fs.mkdirs(new Path(base, "b")));
+ Assertions.assertTrue(fs.mkdirs(base));
+ Assertions.assertTrue(fs.mkdirs(new Path(base, "e")));
+ Assertions.assertTrue(fs.mkdirs(new Path(base, "c")));
+ Assertions.assertTrue(fs.mkdirs(new Path(base, "a")));
+ Assertions.assertTrue(fs.mkdirs(new Path(base, "d")));
+ Assertions.assertTrue(fs.mkdirs(new Path(base, "b")));
fs.deleteOnExit(base);
FileStatus[] status = fs.globStatus(new Path(base, "*"));
ArrayList list = new ArrayList();
@@ -1321,7 +1326,7 @@ public void testLocalFilesystem() throws Exception {
list.add(f.getPath().toString());
}
boolean sorted = Ordering.natural().isOrdered(list);
- Assert.assertTrue(sorted);
+ Assertions.assertTrue(sorted);
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java
index 8c37351f41f36..ed4a1d0d1de49 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java
@@ -18,15 +18,6 @@
package org.apache.hadoop.fs;
-import static org.apache.hadoop.fs.FileContextTestHelper.exists;
-import static org.junit.Assert.fail;
-
-import java.io.IOException;
-import java.net.URI;
-import java.net.URISyntaxException;
-
-import javax.security.auth.login.LoginException;
-
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.hdfs.AppendTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
@@ -36,12 +27,21 @@
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.After;
-import org.junit.AfterClass;
import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+
+import javax.security.auth.login.LoginException;
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+
+import static org.apache.hadoop.fs.FileContextTestHelper.exists;
+import static org.junit.jupiter.api.Assertions.fail;
public class TestHDFSFileContextMainOperations extends
FileContextMainOperationsBaseTest {
@@ -54,7 +54,7 @@ protected FileContextTestHelper createFileContextHelper() {
return new FileContextTestHelper("/tmp/TestHDFSFileContextMainOperations");
}
- @BeforeClass
+ @BeforeAll
public static void clusterSetupAtBegining() throws IOException,
LoginException, URISyntaxException {
cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build();
@@ -80,7 +80,7 @@ private static void restartCluster() throws IOException, LoginException {
fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true);
}
- @AfterClass
+ @AfterAll
public static void ClusterShutdownAtEnd() throws Exception {
if (cluster != null) {
cluster.shutdown();
@@ -89,13 +89,13 @@ public static void ClusterShutdownAtEnd() throws Exception {
}
@Override
- @Before
+ @BeforeEach
public void setUp() throws Exception {
super.setUp();
}
@Override
- @After
+ @AfterEach
public void tearDown() throws Exception {
super.tearDown();
}
@@ -134,16 +134,16 @@ public void testTruncate() throws Exception {
boolean isReady = fc.truncate(file, newLength);
- Assert.assertTrue("Recovery is not expected.", isReady);
+ Assertions.assertTrue(isReady, "Recovery is not expected.");
FileStatus fileStatus = fc.getFileStatus(file);
- Assert.assertEquals(fileStatus.getLen(), newLength);
+ Assertions.assertEquals(fileStatus.getLen(), newLength);
AppendTestUtil.checkFullFile(fs, file, newLength, data, file.toString());
ContentSummary cs = fs.getContentSummary(dir);
- Assert.assertEquals("Bad disk space usage", cs.getSpaceConsumed(),
- newLength * repl);
- Assert.assertTrue(fs.delete(dir, true));
+ Assertions.assertEquals(cs.getSpaceConsumed(),
+ newLength * repl, "Bad disk space usage");
+ Assertions.assertTrue(fs.delete(dir, true));
}
@Test
@@ -280,8 +280,8 @@ public void testEditsLogOldRename() throws Exception {
fs = cluster.getFileSystem();
src1 = getTestRootPath(fc, "testEditsLogOldRename/srcdir/src1");
dst1 = getTestRootPath(fc, "testEditsLogOldRename/dstdir/dst1");
- Assert.assertFalse(fs.exists(src1)); // ensure src1 is already renamed
- Assert.assertTrue(fs.exists(dst1)); // ensure rename dst exists
+ Assertions.assertFalse(fs.exists(src1)); // ensure src1 is already renamed
+ Assertions.assertTrue(fs.exists(dst1)); // ensure rename dst exists
}
/**
@@ -309,8 +309,8 @@ public void testEditsLogRename() throws Exception {
fs = cluster.getFileSystem();
src1 = getTestRootPath(fc, "testEditsLogRename/srcdir/src1");
dst1 = getTestRootPath(fc, "testEditsLogRename/dstdir/dst1");
- Assert.assertFalse(fs.exists(src1)); // ensure src1 is already renamed
- Assert.assertTrue(fs.exists(dst1)); // ensure rename dst exists
+ Assertions.assertFalse(fs.exists(src1)); // ensure src1 is already renamed
+ Assertions.assertTrue(fs.exists(dst1)); // ensure rename dst exists
}
@Test
@@ -324,7 +324,7 @@ public void testIsValidNameInvalidNames() {
for (String invalidName: invalidNames) {
Assert.assertFalse(invalidName + " is not valid",
- fc.getDefaultFileSystem().isValidName(invalidName));
+ fc.getDefaultFileSystem().isValidName(invalidName));
}
}
@@ -332,9 +332,9 @@ private void oldRename(Path src, Path dst, boolean renameSucceeds,
boolean exception) throws Exception {
DistributedFileSystem fs = cluster.getFileSystem();
try {
- Assert.assertEquals(renameSucceeds, fs.rename(src, dst));
+ Assertions.assertEquals(renameSucceeds, fs.rename(src, dst));
} catch (Exception ex) {
- Assert.assertTrue(exception);
+ Assertions.assertTrue(exception);
}
Assert.assertEquals(renameSucceeds, !exists(fc, src));
Assert.assertEquals(renameSucceeds, exists(fc, dst));
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHdfsNativeCodeLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHdfsNativeCodeLoader.java
index 4ecca5e55a1db..da030d376c522 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHdfsNativeCodeLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHdfsNativeCodeLoader.java
@@ -17,12 +17,12 @@
*/
package org.apache.hadoop.fs;
-import org.junit.Test;
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.fail;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.util.NativeCodeLoader;
+import org.junit.jupiter.api.Test;
public class TestHdfsNativeCodeLoader {
static final Logger LOG =
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java
index 05060af4a90d8..bf4d4be9e0f23 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java
@@ -19,14 +19,14 @@
package org.apache.hadoop.fs;
import java.io.File;
-import static org.junit.Assert.fail;
-
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.EnumSet;
import java.util.List;
import java.util.Set;
+import static org.junit.jupiter.api.Assertions.fail;
+
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -39,10 +39,10 @@
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
/**
* Tests whether FileContext can resolve an hdfs path that has a symlink to
@@ -53,7 +53,7 @@ public class TestResolveHdfsSymlink {
private static final FileContextTestHelper helper = new FileContextTestHelper();
private static MiniDFSCluster cluster = null;
- @BeforeClass
+ @BeforeAll
public static void setUp() throws IOException {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(
@@ -63,7 +63,7 @@ public static void setUp() throws IOException {
}
- @AfterClass
+ @AfterAll
public static void tearDown() {
if (cluster != null) {
cluster.shutdown();
@@ -100,11 +100,11 @@ public void testFcResolveAfs() throws IOException, InterruptedException {
Set afsList = fcHdfs
.resolveAbstractFileSystems(alphaHdfsPathViaLink);
- Assert.assertEquals(2, afsList.size());
+ Assertions.assertEquals(2, afsList.size());
for (AbstractFileSystem afs : afsList) {
if ((!afs.equals(fcHdfs.getDefaultFileSystem()))
&& (!afs.equals(fcLocal.getDefaultFileSystem()))) {
- Assert.fail("Failed to resolve AFS correctly");
+ Assertions.fail("Failed to resolve AFS correctly");
}
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSWebHdfsFileContextMainOperations.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSWebHdfsFileContextMainOperations.java
index 84bd98bf67c78..c1f0c74df0f7a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSWebHdfsFileContextMainOperations.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSWebHdfsFileContextMainOperations.java
@@ -25,8 +25,8 @@
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
import org.apache.hadoop.security.ssl.SSLFactory;
import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
import javax.security.auth.login.LoginException;
import java.io.File;
@@ -57,7 +57,7 @@ public class TestSWebHdfsFileContextMainOperations
protected static final byte[] data = getFileData(numBlocks,
getDefaultBlockSize());
- @BeforeClass
+ @BeforeAll
public static void clusterSetupAtBeginning()
throws IOException, LoginException, URISyntaxException {
@@ -104,7 +104,7 @@ public URI getWebhdfsUrl() {
return webhdfsUrl;
}
- @AfterClass
+ @AfterAll
public static void ClusterShutdownAtEnd() throws Exception {
if (cluster != null) {
cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfs.java
index fd81a1e23fbcf..2c60bf7115738 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfs.java
@@ -17,8 +17,8 @@
*/
package org.apache.hadoop.fs;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.fail;
import java.io.IOException;
import java.net.URI;
@@ -38,9 +38,9 @@
import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
import org.slf4j.event.Level;
/**
@@ -84,7 +84,7 @@ protected IOException unwrapException(IOException e) {
return e;
}
- @BeforeClass
+ @BeforeAll
public static void beforeClassSetup() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.set(FsPermission.UMASK_LABEL, "000");
@@ -94,7 +94,7 @@ public static void beforeClassSetup() throws Exception {
dfs = cluster.getFileSystem();
}
- @AfterClass
+ @AfterAll
public static void afterClassTeardown() throws Exception {
if (cluster != null) {
cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfsDisable.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfsDisable.java
index e5a513394a9bf..421c36392195c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfsDisable.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfsDisable.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.fs;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.fail;
import java.io.IOException;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfsFileContext.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfsFileContext.java
index 2a3e4c363719d..d5f9c99afe068 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfsFileContext.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfsFileContext.java
@@ -17,18 +17,18 @@
*/
package org.apache.hadoop.fs;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.fail;
import java.io.IOException;
-import org.junit.BeforeClass;
import org.junit.Test;
+import org.junit.jupiter.api.BeforeAll;
public class TestSymlinkHdfsFileContext extends TestSymlinkHdfs {
private static FileContext fc;
- @BeforeClass
+ @BeforeAll
public static void testSetup() throws Exception {
fc = FileContext.getFileContext(cluster.getURI(0));
wrapper = new FileContextTestWrapper(fc, "/tmp/TestSymlinkHdfsFileContext");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfsFileSystem.java
index fba9c42858d61..3edaf4bb0bf3e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfsFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfsFileSystem.java
@@ -17,28 +17,27 @@
*/
package org.apache.hadoop.fs;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.IOException;
-import org.junit.BeforeClass;
-import org.junit.Ignore;
import org.junit.Test;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Disabled;
public class TestSymlinkHdfsFileSystem extends TestSymlinkHdfs {
- @BeforeClass
+ @BeforeAll
public static void testSetup() throws Exception {
wrapper = new FileSystemTestWrapper(dfs, "/tmp/TestSymlinkHdfsFileSystem");
}
@Override
- @Ignore("FileSystem adds missing authority in absolute URIs")
+ @Disabled("FileSystem adds missing authority in absolute URIs")
@Test(timeout=10000)
public void testCreateWithPartQualPathFails() throws IOException {}
- @Ignore("FileSystem#create creates parent directories," +
+ @Disabled("FileSystem#create creates parent directories," +
" so dangling links to directories are created")
@Override
@Test(timeout=10000)
@@ -56,7 +55,7 @@ public void testRecoverLease() throws IOException {
wrapper.createSymlink(file, link, false);
// Attempt recoverLease through a symlink
boolean closed = dfs.recoverLease(link);
- assertTrue("Expected recoverLease to return true", closed);
+ assertTrue(closed, "Expected recoverLease to return true");
}
@Test(timeout=10000)
@@ -69,7 +68,7 @@ public void testIsFileClosed() throws IOException {
wrapper.createSymlink(file, link, false);
// Attempt recoverLease through a symlink
boolean closed = dfs.isFileClosed(link);
- assertTrue("Expected isFileClosed to return true", closed);
+ assertTrue(closed, "Expected isFileClosed to return true");
}
@Test(timeout=10000)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUnbuffer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUnbuffer.java
index feb77f8457c9c..18319a8227943 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUnbuffer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUnbuffer.java
@@ -26,9 +26,9 @@
import org.apache.hadoop.hdfs.PeerCache;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.io.IOUtils;
-import org.junit.Assert;
import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
import org.junit.rules.ExpectedException;
import org.mockito.Mockito;
@@ -72,19 +72,19 @@ public void testUnbufferClosesSockets() throws Exception {
// Read a byte. This will trigger the creation of a block reader.
stream.seek(2);
int b = stream.read();
- Assert.assertTrue(-1 != b);
+ Assertions.assertTrue(-1 != b);
// The Peer cache should start off empty.
PeerCache cache = dfs.getClient().getClientContext().getPeerCache();
- Assert.assertEquals(0, cache.size());
+ Assertions.assertEquals(0, cache.size());
// Unbuffer should clear the block reader and return the socket to the
// cache.
stream.unbuffer();
stream.seek(2);
- Assert.assertEquals(1, cache.size());
+ Assertions.assertEquals(1, cache.size());
int b2 = stream.read();
- Assert.assertEquals(b, b2);
+ Assertions.assertEquals(b, b2);
} finally {
if (stream != null) {
IOUtils.cleanupWithLogger(null, stream);
@@ -117,7 +117,7 @@ public void testOpenManyFilesViaTcp() throws Exception {
for (int i = 0; i < NUM_OPENS; i++) {
streams[i] = dfs.open(TEST_PATH);
LOG.info("opening file " + i + "...");
- Assert.assertTrue(-1 != streams[i].read());
+ Assertions.assertTrue(-1 != streams[i].read());
streams[i].unbuffer();
}
} finally {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java
index 5a04f67846b15..231ffb96dda67 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java
@@ -17,9 +17,7 @@
*/
package org.apache.hadoop.fs;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
+import static org.junit.jupiter.api.Assertions.*;
import java.io.File;
import java.io.IOException;
@@ -34,8 +32,8 @@
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.test.PathUtils;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
/**
* Test of the URL stream handler.
@@ -48,7 +46,7 @@ public class TestUrlStreamHandler {
private static final FsUrlStreamHandlerFactory HANDLER_FACTORY
= new FsUrlStreamHandlerFactory();
- @BeforeClass
+ @BeforeAll
public static void setupHandler() {
// Setup our own factory
@@ -166,20 +164,20 @@ public void testFileUrls() throws IOException, URISyntaxException {
@Test
public void testHttpDefaultHandler() throws Throwable {
- assertNull("Handler for HTTP is the Hadoop one",
- HANDLER_FACTORY.createURLStreamHandler("http"));
+ assertNull(
+ HANDLER_FACTORY.createURLStreamHandler("http"), "Handler for HTTP is the Hadoop one");
}
@Test
public void testHttpsDefaultHandler() throws Throwable {
- assertNull("Handler for HTTPS is the Hadoop one",
- HANDLER_FACTORY.createURLStreamHandler("https"));
+ assertNull(
+ HANDLER_FACTORY.createURLStreamHandler("https"), "Handler for HTTPS is the Hadoop one");
}
@Test
public void testUnknownProtocol() throws Throwable {
- assertNull("Unknown protocols are not handled",
- HANDLER_FACTORY.createURLStreamHandler("gopher"));
+ assertNull(
+ HANDLER_FACTORY.createURLStreamHandler("gopher"), "Unknown protocols are not handled");
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestWebHdfsFileContextMainOperations.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestWebHdfsFileContextMainOperations.java
index 7544835c7a3dd..9314798fe8b1c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestWebHdfsFileContextMainOperations.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestWebHdfsFileContextMainOperations.java
@@ -24,10 +24,10 @@
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
import javax.security.auth.login.LoginException;
import java.io.IOException;
@@ -38,8 +38,8 @@
import static org.apache.hadoop.fs.CreateFlag.CREATE;
import static org.apache.hadoop.fs.FileContextTestHelper.getDefaultBlockSize;
import static org.apache.hadoop.fs.FileContextTestHelper.getFileData;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Test of FileContext apis on Webhdfs.
@@ -71,7 +71,7 @@ public URI getWebhdfsUrl() {
return webhdfsUrl;
}
- @BeforeClass
+ @BeforeAll
public static void clusterSetupAtBeginning()
throws IOException, LoginException, URISyntaxException {
@@ -85,7 +85,7 @@ public static void clusterSetupAtBeginning()
fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true);
}
- @Before
+ @BeforeEach
public void setUp() throws Exception {
URI webhdfsUrlReal = getWebhdfsUrl();
Path testBuildData = new Path(
@@ -153,7 +153,7 @@ public void testSetVerifyChecksum() throws IOException {
assertArrayEquals(data, bb);
}
- @AfterClass
+ @AfterAll
public static void ClusterShutdownAtEnd() throws Exception {
if (cluster != null) {
cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/HDFSContract.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/HDFSContract.java
index 74b9a35adfcdb..e4e481f9a8044 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/HDFSContract.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/HDFSContract.java
@@ -27,7 +27,7 @@
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.junit.Assert;
+import org.junit.jupiter.api.Assertions;
import java.io.IOException;
@@ -73,14 +73,14 @@ public static MiniDFSCluster getCluster() {
@Override
public void init() throws IOException {
super.init();
- Assert.assertTrue("contract options not loaded",
- isSupported(ContractOptions.IS_CASE_SENSITIVE, false));
+ Assertions.assertTrue(
+ isSupported(ContractOptions.IS_CASE_SENSITIVE, false), "contract options not loaded");
}
@Override
public FileSystem getTestFileSystem() throws IOException {
- //assumes cluster is not null
- Assert.assertNotNull("cluster not created", cluster);
+ //assumes cluster is not null
+ Assertions.assertNotNull(cluster, "cluster not created");
return cluster.getFileSystem();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractAppend.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractAppend.java
index 897354c1386f3..ed3cead8a2812 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractAppend.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractAppend.java
@@ -17,19 +17,19 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractAppendTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
import java.io.IOException;
public class TestHDFSContractAppend extends AbstractContractAppendTest {
- @BeforeClass
+ @BeforeAll
public static void createCluster() throws IOException {
HDFSContract.createCluster();
}
- @AfterClass
+ @AfterAll
public static void teardownCluster() throws IOException {
HDFSContract.destroyCluster();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractConcat.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractConcat.java
index 05587ce7e40fa..ddb5cc2560c78 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractConcat.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractConcat.java
@@ -21,8 +21,8 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractConcatTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
import java.io.IOException;
@@ -31,14 +31,14 @@
*/
public class TestHDFSContractConcat extends AbstractContractConcatTest {
- @BeforeClass
+ @BeforeAll
public static void createCluster() throws IOException {
HDFSContract.createCluster();
// perform a simple operation on the cluster to verify it is up
HDFSContract.getCluster().getFileSystem().getDefaultBlockSize();
}
- @AfterClass
+ @AfterAll
public static void teardownCluster() throws IOException {
HDFSContract.destroyCluster();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractCreate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractCreate.java
index b209bf130e2be..8c9cbaf395854 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractCreate.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractCreate.java
@@ -21,19 +21,19 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractCreateTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
import java.io.IOException;
public class TestHDFSContractCreate extends AbstractContractCreateTest {
- @BeforeClass
+ @BeforeAll
public static void createCluster() throws IOException {
HDFSContract.createCluster();
}
- @AfterClass
+ @AfterAll
public static void teardownCluster() throws IOException {
HDFSContract.destroyCluster();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractDelete.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractDelete.java
index 4dc4af05addac..47fbf30d42e3f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractDelete.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractDelete.java
@@ -21,8 +21,8 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractDeleteTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
import java.io.IOException;
@@ -31,12 +31,12 @@
*/
public class TestHDFSContractDelete extends AbstractContractDeleteTest {
- @BeforeClass
+ @BeforeAll
public static void createCluster() throws IOException {
HDFSContract.createCluster();
}
- @AfterClass
+ @AfterAll
public static void teardownCluster() throws IOException {
HDFSContract.destroyCluster();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractGetFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractGetFileStatus.java
index d81d3c200fee2..81b04407e76aa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractGetFileStatus.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractGetFileStatus.java
@@ -21,20 +21,20 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractGetFileStatusTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
import java.io.IOException;
public class TestHDFSContractGetFileStatus extends
AbstractContractGetFileStatusTest {
- @BeforeClass
+ @BeforeAll
public static void createCluster() throws IOException {
HDFSContract.createCluster();
}
- @AfterClass
+ @AfterAll
public static void teardownCluster() throws IOException {
HDFSContract.destroyCluster();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractMkdir.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractMkdir.java
index 053429dec803f..fae15f4a3c31a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractMkdir.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractMkdir.java
@@ -21,8 +21,8 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractMkdirTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
import java.io.IOException;
@@ -31,12 +31,12 @@
*/
public class TestHDFSContractMkdir extends AbstractContractMkdirTest {
- @BeforeClass
+ @BeforeAll
public static void createCluster() throws IOException {
HDFSContract.createCluster();
}
- @AfterClass
+ @AfterAll
public static void teardownCluster() throws IOException {
HDFSContract.destroyCluster();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractMultipartUploader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractMultipartUploader.java
index 0efb33f5db200..6f532da38e942 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractMultipartUploader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractMultipartUploader.java
@@ -19,14 +19,14 @@
import java.io.IOException;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.BeforeAll;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractMultipartUploaderTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
+import org.junit.jupiter.api.AfterAll;
/**
* Test MultipartUploader tests on HDFS.
@@ -37,12 +37,12 @@ public class TestHDFSContractMultipartUploader extends
protected static final Logger LOG =
LoggerFactory.getLogger(TestHDFSContractMultipartUploader.class);
- @BeforeClass
+ @BeforeAll
public static void createCluster() throws IOException {
HDFSContract.createCluster();
}
- @AfterClass
+ @AfterAll
public static void teardownCluster() throws IOException {
HDFSContract.destroyCluster();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractOpen.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractOpen.java
index 0d9e8103208ee..3a11f1b85b9ed 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractOpen.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractOpen.java
@@ -21,8 +21,8 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractOpenTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
import java.io.IOException;
@@ -31,12 +31,12 @@
*/
public class TestHDFSContractOpen extends AbstractContractOpenTest {
- @BeforeClass
+ @BeforeAll
public static void createCluster() throws IOException {
HDFSContract.createCluster();
}
- @AfterClass
+ @AfterAll
public static void teardownCluster() throws IOException {
HDFSContract.destroyCluster();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractPathHandle.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractPathHandle.java
index c65a60b18b195..46a78726ce776 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractPathHandle.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractPathHandle.java
@@ -21,8 +21,8 @@
import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.contract.AbstractContractPathHandleTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
import java.io.IOException;
@@ -35,15 +35,15 @@ public class TestHDFSContractPathHandle
public TestHDFSContractPathHandle(String testname, Options.HandleOpt[] opts,
boolean serialized) {
- super(testname, opts, serialized);
+ super(testname, opts, serialized);
}
- @BeforeClass
+ @BeforeAll
public static void createCluster() throws IOException {
HDFSContract.createCluster();
}
- @AfterClass
+ @AfterAll
public static void teardownCluster() throws IOException {
HDFSContract.destroyCluster();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractRename.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractRename.java
index 706b0cf826494..f96dfae62296b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractRename.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractRename.java
@@ -21,19 +21,19 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractRenameTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
import java.io.IOException;
public class TestHDFSContractRename extends AbstractContractRenameTest {
- @BeforeClass
+ @BeforeAll
public static void createCluster() throws IOException {
HDFSContract.createCluster();
}
- @AfterClass
+ @AfterAll
public static void teardownCluster() throws IOException {
HDFSContract.destroyCluster();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractRootDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractRootDirectory.java
index fc1851db5fb08..dc9fdd7989a6b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractRootDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractRootDirectory.java
@@ -21,8 +21,8 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractRootDirectoryTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
import java.io.IOException;
@@ -32,12 +32,12 @@
public class TestHDFSContractRootDirectory extends
AbstractContractRootDirectoryTest {
- @BeforeClass
+ @BeforeAll
public static void createCluster() throws IOException {
HDFSContract.createCluster();
}
- @AfterClass
+ @AfterAll
public static void teardownCluster() throws IOException {
HDFSContract.destroyCluster();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractSeek.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractSeek.java
index 259ffce824c57..b12d6aa7039a0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractSeek.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractSeek.java
@@ -21,8 +21,8 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractSeekTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
import java.io.IOException;
@@ -31,12 +31,12 @@
*/
public class TestHDFSContractSeek extends AbstractContractSeekTest {
- @BeforeClass
+ @BeforeAll
public static void createCluster() throws IOException {
HDFSContract.createCluster();
}
- @AfterClass
+ @AfterAll
public static void teardownCluster() throws IOException {
HDFSContract.destroyCluster();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractSetTimes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractSetTimes.java
index 4899189b01477..8ae002a3525c3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractSetTimes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractSetTimes.java
@@ -21,19 +21,19 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractSetTimesTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
import java.io.IOException;
public class TestHDFSContractSetTimes extends AbstractContractSetTimesTest {
- @BeforeClass
+ @BeforeAll
public static void createCluster() throws IOException {
HDFSContract.createCluster();
}
- @AfterClass
+ @AfterAll
public static void teardownCluster() throws IOException {
HDFSContract.destroyCluster();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractUnbuffer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractUnbuffer.java
index 54b8bf1c700e6..7ee5996d10e57 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractUnbuffer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractUnbuffer.java
@@ -21,20 +21,19 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractUnbufferTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
-
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
import java.io.IOException;
public class TestHDFSContractUnbuffer extends AbstractContractUnbufferTest {
- @BeforeClass
+ @BeforeAll
public static void createCluster() throws IOException {
HDFSContract.createCluster();
}
- @AfterClass
+ @AfterAll
public static void teardownCluster() throws IOException {
HDFSContract.destroyCluster();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java
index 6c7bac31ea860..dcca1b8974c20 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.fs.loadGenerator;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
import java.io.BufferedReader;
import java.io.File;
@@ -33,7 +33,8 @@
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
+
/**
* This class tests if a balancer schedules tasks correctly.
*/
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java
index 96d15e59a52ab..7c7d17b9e2035 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java
@@ -22,10 +22,7 @@
import static org.apache.hadoop.fs.permission.AclEntryType.USER;
import static org.apache.hadoop.fs.permission.FsAction.ALL;
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
import java.io.IOException;
import java.util.Arrays;
@@ -45,10 +42,10 @@
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -66,7 +63,7 @@ public class TestStickyBit {
private static FileSystem hdfsAsUser1;
private static FileSystem hdfsAsUser2;
- @BeforeClass
+ @BeforeAll
public static void init() throws Exception {
conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
@@ -85,7 +82,7 @@ private static void initCluster(boolean format) throws Exception {
assertTrue(hdfsAsUser2 instanceof DistributedFileSystem);
}
- @Before
+ @BeforeEach
public void setup() throws Exception {
if (hdfs != null) {
for (FileStatus stat: hdfs.listStatus(new Path("/"))) {
@@ -94,7 +91,7 @@ public void setup() throws Exception {
}
}
- @AfterClass
+ @AfterAll
public static void shutdown() throws Exception {
IOUtils.cleanupWithLogger(null, hdfs, hdfsAsUser1, hdfsAsUser2);
if (cluster != null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/shell/TestHdfsTextCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/shell/TestHdfsTextCommand.java
index 23de658529684..2e85b33e9a8b8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/shell/TestHdfsTextCommand.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/shell/TestHdfsTextCommand.java
@@ -18,8 +18,8 @@
package org.apache.hadoop.fs.shell;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.IOException;
import java.io.InputStream;
@@ -33,9 +33,9 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
/**
@@ -48,7 +48,7 @@ public class TestHdfsTextCommand {
private static MiniDFSCluster cluster;
private static FileSystem fs;
- @Before
+ @BeforeEach
public void setUp() throws IOException{
Configuration conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).build();
@@ -56,7 +56,7 @@ public void setUp() throws IOException{
fs = cluster.getFileSystem();
}
- @After
+ @AfterEach
public void tearDown() throws IOException{
if(fs != null){
fs.close();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestNNStartupWhenViewFSOverloadSchemeEnabled.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestNNStartupWhenViewFSOverloadSchemeEnabled.java
index bbdbd5a0b0d40..c71b7d326a8c5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestNNStartupWhenViewFSOverloadSchemeEnabled.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestNNStartupWhenViewFSOverloadSchemeEnabled.java
@@ -24,9 +24,9 @@
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
-import org.junit.After;
-import org.junit.BeforeClass;
import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
/**
* Tests that the NN startup is successful with ViewFSOverloadScheme.
@@ -37,7 +37,7 @@ public class TestNNStartupWhenViewFSOverloadSchemeEnabled {
private static final String HDFS_SCHEME = "hdfs";
private static final Configuration CONF = new Configuration();
- @BeforeClass
+ @BeforeAll
public static void setUp() {
CONF.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
CONF.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
@@ -78,7 +78,7 @@ public void testNameNodeAndDataNodeStartup() throws Exception {
cluster.waitActive();
}
- @After
+ @AfterEach
public void shutdownCluster() {
if (cluster != null) {
cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFSOverloadSchemeWithMountTableConfigInHDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFSOverloadSchemeWithMountTableConfigInHDFS.java
index 5e2f42b77a3a7..f0221bf79f383 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFSOverloadSchemeWithMountTableConfigInHDFS.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFSOverloadSchemeWithMountTableConfigInHDFS.java
@@ -25,7 +25,7 @@
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.junit.Before;
+import org.junit.jupiter.api.BeforeEach;
/**
* Tests ViewFileSystemOverloadScheme with configured mount links.
@@ -35,7 +35,7 @@ public class TestViewFSOverloadSchemeWithMountTableConfigInHDFS
private Path oldVersionMountTablePath;
private Path newVersionMountTablePath;
- @Before
+ @BeforeEach
@Override
public void setUp() throws IOException {
super.setUp();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java
index 9b2953c61cd00..1e7cfbded2b4e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java
@@ -29,9 +29,9 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
/**
* Make sure that ViewFileSystem works when the root of an FS is mounted to a
@@ -48,7 +48,7 @@ protected FileSystemTestHelper createFileSystemHelper() {
return new FileSystemTestHelper("/tmp/TestViewFileSystemAtHdfsRoot");
}
- @BeforeClass
+ @BeforeAll
public static void clusterSetupAtBegining() throws IOException,
LoginException, URISyntaxException {
SupportsBlocks = true;
@@ -63,7 +63,7 @@ public static void clusterSetupAtBegining() throws IOException,
fHdfs = cluster.getFileSystem();
}
- @AfterClass
+ @AfterAll
public static void clusterShutdownAtEnd() throws Exception {
if (cluster != null) {
cluster.shutdown();
@@ -71,7 +71,7 @@ public static void clusterShutdownAtEnd() throws Exception {
}
@Override
- @Before
+ @BeforeEach
public void setUp() throws Exception {
fsTarget = fHdfs;
super.setUp();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
index fcb52577d9991..f59060ec3071f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
@@ -41,7 +41,6 @@
import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.contract.ContractTestUtils;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
@@ -56,12 +55,9 @@
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import static org.junit.Assert.*;
+import org.junit.jupiter.api.*;
+
+import static org.junit.jupiter.api.Assertions.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -85,7 +81,7 @@ protected FileSystemTestHelper createFileSystemHelper() {
return new FileSystemTestHelper("/tmp/TestViewFileSystemHdfs");
}
- @BeforeClass
+ @BeforeAll
public static void clusterSetupAtBegining() throws IOException,
LoginException, URISyntaxException {
@@ -129,7 +125,7 @@ public static void clusterSetupAtBegining() throws IOException,
}
- @AfterClass
+ @AfterAll
public static void ClusterShutdownAtEnd() throws Exception {
if (cluster != null) {
cluster.shutdown();
@@ -137,7 +133,7 @@ public static void ClusterShutdownAtEnd() throws Exception {
}
@Override
- @Before
+ @BeforeEach
public void setUp() throws Exception {
// create the test root on local_fs
fsTarget = fHdfs;
@@ -147,7 +143,7 @@ public void setUp() throws Exception {
}
@Override
- @After
+ @AfterEach
public void tearDown() throws Exception {
super.tearDown();
}
@@ -204,13 +200,13 @@ public void testTrashRootsAfterEncryptionZoneDeletion() throws Exception {
//Verify file deletion within EZ
DFSTestUtil.verifyDelete(shell, fsTarget, encFile, true);
- assertTrue("ViewFileSystem trash roots should include EZ file trash",
- (fsView.getTrashRoots(true).size() == 1));
+ assertEquals(1, fsView.getTrashRoots(true).size(),
+ "ViewFileSystem trash roots should include EZ file trash");
//Verify deletion of EZ
DFSTestUtil.verifyDelete(shell, fsTarget, zone, true);
- assertTrue("ViewFileSystem trash roots should include EZ zone trash",
- (fsView.getTrashRoots(true).size() == 2));
+ assertEquals(2, fsView.getTrashRoots(true).size(),
+ "ViewFileSystem trash roots should include EZ zone trash");
}
@Test
@@ -253,15 +249,15 @@ public void testFileChecksum() throws IOException {
viewFs.getFileChecksum(mountDataFilePath);
FileChecksum fileChecksumViaTargetFs =
fsTarget.getFileChecksum(fsTargetFilePath);
- assertTrue("File checksum not matching!",
- fileChecksumViaViewFs.equals(fileChecksumViaTargetFs));
+ assertTrue(fileChecksumViaViewFs.equals(fileChecksumViaTargetFs),
+ "File checksum not matching!");
fileChecksumViaViewFs =
viewFs.getFileChecksum(mountDataFilePath, fileLength / 2);
fileChecksumViaTargetFs =
fsTarget.getFileChecksum(fsTargetFilePath, fileLength / 2);
- assertTrue("File checksum not matching!",
- fileChecksumViaViewFs.equals(fileChecksumViaTargetFs));
+ assertTrue(fileChecksumViaViewFs.equals(fileChecksumViaTargetFs),
+ "File checksum not matching!");
}
//Rename should fail on across different fileSystems
@@ -276,7 +272,7 @@ public void testRenameAccorssFilesystem() throws IOException {
fsView.create(filePath);
try {
fsView.rename(filePath, hdfFilepath);
- ContractTestUtils.fail("Should thrown IOE on Renames across filesytems");
+ Assertions.fail("Should thrown IOE on Renames across filesytems");
} catch (IOException e) {
GenericTestUtils
.assertExceptionContains("Renames across Mount points not supported",
@@ -325,7 +321,7 @@ private void testNflyRepair(NflyFSystem.NflyKey repairKey)
// 1. test mkdirs
final Path testDir = new Path("testdir1/sub1/sub3");
final Path testDir_tmp = new Path("testdir1/sub1/sub3_temp");
- assertTrue(testDir + ": Failed to create!", nfly.mkdirs(testDir));
+ assertTrue(nfly.mkdirs(testDir), testDir + ": Failed to create!");
// Test renames
assertTrue(nfly.rename(testDir, testDir_tmp));
@@ -333,7 +329,7 @@ private void testNflyRepair(NflyFSystem.NflyKey repairKey)
for (final URI testUri : testUris) {
final FileSystem fs = FileSystem.get(testUri, testConf);
- assertTrue(testDir + " should exist!", fs.exists(testDir));
+ assertTrue(fs.exists(testDir), testDir + " should exist!");
}
// 2. test write
@@ -349,7 +345,7 @@ private void testNflyRepair(NflyFSystem.NflyKey repairKey)
final FileSystem fs = FileSystem.get(testUri, testConf);
final FSDataInputStream fsdis = fs.open(testFile);
try {
- assertEquals("Wrong file content", testString, fsdis.readUTF());
+ assertEquals(testString, fsdis.readUTF(), "Wrong file content");
} finally {
fsdis.close();
}
@@ -364,7 +360,7 @@ private void testNflyRepair(NflyFSystem.NflyKey repairKey)
FSDataInputStream fsDis = null;
try {
fsDis = nfly.open(testFile);
- assertEquals("Wrong file content", testString, fsDis.readUTF());
+ assertEquals(testString, fsDis.readUTF(), "Wrong file content");
} finally {
IOUtils.cleanupWithLogger(LOG, fsDis);
cluster.restartNameNode(i);
@@ -378,7 +374,7 @@ private void testNflyRepair(NflyFSystem.NflyKey repairKey)
FSDataInputStream fsDis = null;
try {
fsDis = nfly.open(testFile);
- assertEquals("Wrong file content", testString, fsDis.readUTF());
+ assertEquals(testString, fsDis.readUTF(), "Wrong file content");
assertTrue(fs1.exists(testFile));
} finally {
IOUtils.cleanupWithLogger(LOG, fsDis);
@@ -393,18 +389,18 @@ private void testNflyRepair(NflyFSystem.NflyKey repairKey)
for (final URI testUri : testUris) {
final FileSystem fs = FileSystem.get(testUri, conf);
fs.setTimes(testFile, 1L, 1L);
- assertEquals(testUri + "Set mtime failed!", 1L,
- fs.getFileStatus(testFile).getModificationTime());
- assertEquals("nfly file status wrong", expectedMtime,
- nfly.getFileStatus(testFile).getModificationTime());
+ assertEquals(1L,
+ fs.getFileStatus(testFile).getModificationTime(), testUri + "Set mtime failed!");
+ assertEquals(expectedMtime,
+ nfly.getFileStatus(testFile).getModificationTime(), "nfly file status wrong");
FSDataInputStream fsDis2 = null;
try {
fsDis2 = nfly.open(testFile);
- assertEquals("Wrong file content", testString, fsDis2.readUTF());
- // repair is done, now trying via normal fs
- //
- assertEquals("Repair most recent failed!", expectedMtime,
- fs.getFileStatus(testFile).getModificationTime());
+ assertEquals(testString, fsDis2.readUTF(), "Wrong file content");
+ // repair is done, now trying via normal fs
+ //
+ assertEquals(expectedMtime,
+ fs.getFileStatus(testFile).getModificationTime(), "Repair most recent failed!");
} finally {
IOUtils.cleanupWithLogger(LOG, fsDis2);
}
@@ -476,7 +472,7 @@ public Object run() throws IOException {
FileSystem otherfs = map.get("user1");
otherfs.mkdirs(user1Path);
String owner = otherfs.getFileStatus(user1Path).getOwner();
- assertEquals("The owner did not match ", owner, userUgi.getShortUserName());
+ assertEquals(owner, userUgi.getShortUserName(), "The owner did not match ");
otherfs.delete(user1Path, false);
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLinkFallback.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLinkFallback.java
index e7317608147be..d97b4d00716dc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLinkFallback.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLinkFallback.java
@@ -18,11 +18,7 @@
package org.apache.hadoop.fs.viewfs;
import static org.apache.hadoop.test.LambdaTestUtils.intercept;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
import java.io.File;
import java.io.IOException;
@@ -45,11 +41,11 @@
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -79,7 +75,7 @@ protected FileSystemTestHelper createFileSystemHelper() {
return new FileSystemTestHelper(TEST_BASE_PATH);
}
- @BeforeClass
+ @BeforeAll
public static void clusterSetupAtBeginning() throws IOException,
LoginException, URISyntaxException {
SupportsBlocks = true;
@@ -100,7 +96,7 @@ public static void clusterSetupAtBeginning() throws IOException,
Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, "/", null, null);
}
- @AfterClass
+ @AfterAll
public static void clusterShutdownAtEnd() throws Exception {
if (cluster != null) {
cluster.shutdown();
@@ -108,7 +104,7 @@ public static void clusterShutdownAtEnd() throws Exception {
}
@Override
- @Before
+ @BeforeEach
public void setUp() throws Exception {
fsTarget = fsDefault;
super.setUp();
@@ -179,10 +175,10 @@ public void testConfLinkFallback() throws Exception {
FileStatus baseFileRelStat = vfs.getFileStatus(new Path(viewFsUri.toString()
+ testBaseFileRelative.toUri().toString()));
LOG.info("BaseFileRelStat: " + baseFileRelStat);
- Assert.assertEquals("Unexpected file length for " + testBaseFile,
- 1, baseFileStat.getLen());
- Assert.assertEquals("Unexpected file length for " + testBaseFileRelative,
- baseFileStat.getLen(), baseFileRelStat.getLen());
+ Assertions.assertEquals(
+ 1, baseFileStat.getLen(), "Unexpected file length for " + testBaseFile);
+ Assertions.assertEquals(
+ baseFileStat.getLen(), baseFileRelStat.getLen(), "Unexpected file length for " + testBaseFileRelative);
FileStatus level2FileStat = vfs.getFileStatus(new Path(viewFsUri.toString()
+ testLevel2File.toUri().toString()));
LOG.info("Level2FileStat: " + level2FileStat);
@@ -228,8 +224,8 @@ public void testConfLinkFallbackWithRegularLinks() throws Exception {
FileStatus baseFileStat = vfs.getFileStatus(
new Path(viewFsUri.toString() + testBaseFile.toUri().toString()));
LOG.info("BaseFileStat: " + baseFileStat);
- Assert.assertEquals("Unexpected file length for " + testBaseFile,
- 0, baseFileStat.getLen());
+ Assertions.assertEquals(
+ 0, baseFileStat.getLen(), "Unexpected file length for " + testBaseFile);
FileStatus level2FileStat = vfs.getFileStatus(new Path(viewFsUri.toString()
+ testLevel2File.toUri().toString()));
LOG.info("Level2FileStat: " + level2FileStat);
@@ -240,8 +236,8 @@ public void testConfLinkFallbackWithRegularLinks() throws Exception {
FileStatus level2FileStatAfterWrite = vfs.getFileStatus(
new Path(viewFsUri.toString() + testLevel2File.toUri().toString()));
- Assert.assertTrue("Unexpected file length for " + testLevel2File,
- level2FileStatAfterWrite.getLen() > level2FileStat.getLen());
+ Assertions.assertTrue(
+ level2FileStatAfterWrite.getLen() > level2FileStat.getLen(), "Unexpected file length for " + testLevel2File);
vfs.close();
}
@@ -265,8 +261,8 @@ public void testConfLinkFallbackWithMountPoint() throws Exception {
FileSystem.get(viewFsUri, conf);
fail("Shouldn't allow linkMergeSlash to take extra mount points!");
} catch (IOException e) {
- assertTrue("Unexpected error: " + e.getMessage(),
- e.getMessage().contains(expectedErrorMsg));
+ assertTrue(
+ e.getMessage().contains(expectedErrorMsg), "Unexpected error: " + e.getMessage());
}
}
@@ -299,13 +295,13 @@ public void testListingWithFallbackLink() throws Exception {
afterFallback.add(stat.getPath());
}
afterFallback.removeAll(beforeFallback);
- assertTrue("Listing didn't include fallback link",
- afterFallback.size() == 1);
+ assertTrue(
+ afterFallback.size() == 1, "Listing didn't include fallback link");
Path[] fallbackArray = new Path[afterFallback.size()];
afterFallback.toArray(fallbackArray);
Path expected = new Path(viewFsUri.toString(), "dir1");
- assertEquals("Path did not match",
- expected, fallbackArray[0]);
+ assertEquals(
+ expected, fallbackArray[0], "Path did not match");
// Create a directory using the returned fallback path and verify
Path childDir = new Path(fallbackArray[0], "child");
@@ -349,13 +345,13 @@ public void testListingWithFallbackLinkWithSameMountDirectories()
afterFallback.add(stat.getPath());
}
afterFallback.removeAll(beforeFallback);
- assertEquals("The same directory name in fallback link should be shaded",
- 1, afterFallback.size());
+ assertEquals(
+ 1, afterFallback.size(), "The same directory name in fallback link should be shaded");
Path[] fallbackArray = new Path[afterFallback.size()];
// Only user1 should be listed as fallback link
Path expected = new Path(viewFsDefaultClusterUri.toString(), "user1");
- assertEquals("Path did not match",
- expected, afterFallback.toArray(fallbackArray)[0]);
+ assertEquals(
+ expected, afterFallback.toArray(fallbackArray)[0], "Path did not match");
// Create a directory using the returned fallback path and verify
Path childDir = new Path(fallbackArray[0], "child");
@@ -430,8 +426,8 @@ public void testListingWithFallbackLinkWithSameMountDirectoryTree()
}
//viewfs://default/user1/hive/warehouse
afterFallback.removeAll(beforeFallback);
- assertEquals("The same directory name in fallback link should be shaded",
- 1, afterFallback.size());
+ assertEquals(
+ 1, afterFallback.size(), "The same directory name in fallback link should be shaded");
}
}
@@ -502,8 +498,8 @@ public void testLSOnLinkParentWithFallbackLinkWithSameMountDirectoryTree()
}
}
afterFallback.removeAll(beforeFallback);
- assertEquals("Just to make sure paths are same.", 0,
- afterFallback.size());
+ assertEquals(0,
+ afterFallback.size(), "Just to make sure paths are same.");
}
}
@@ -559,14 +555,14 @@ public void testLSOnRootWithFallbackLinkWithSameMountDirectories()
assertEquals(FsPermission.valueOf("-rwxr--rw-"),
stat.getPermission());
} else {
- assertEquals("Path is: " + stat.getPath(),
- FsPermission.valueOf("-rwxr--r--"), stat.getPermission());
+ assertEquals(
+ FsPermission.valueOf("-rwxr--r--"), stat.getPermission(), "Path is: " + stat.getPath());
}
}
afterFallback.removeAll(beforeFallback);
assertEquals(1, afterFallback.size());
- assertEquals("/user2 dir from fallback should be listed.", "user2",
- afterFallback.iterator().next().getName());
+ assertEquals("user2",
+ afterFallback.iterator().next().getName(), "/user2 dir from fallback should be listed.");
}
}
@@ -908,7 +904,7 @@ public void testCreateFileSameAsInternalDirPath() throws Exception {
assertFalse(fsTarget.exists(Path.mergePaths(fallbackTarget, vfsTestDir)));
try {
vfs.create(vfsTestDir);
- Assert.fail("Should fail to create file as this is an internal dir.");
+ Assertions.fail("Should fail to create file as this is an internal dir.");
} catch (NotInMountpointException e){
// This tree is part of internal tree. The above exception will be
// thrown from getDefaultReplication, getDefaultBlockSize APIs which was
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLinkMergeSlash.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLinkMergeSlash.java
index 606743f582dfc..0c668dc05da69 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLinkMergeSlash.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLinkMergeSlash.java
@@ -34,17 +34,11 @@
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
import javax.security.auth.login.LoginException;
@@ -74,7 +68,7 @@ protected FileSystemTestHelper createFileSystemHelper() {
return new FileSystemTestHelper(TEST_TEMP_PATH);
}
- @BeforeClass
+ @BeforeAll
public static void clusterSetupAtBeginning() throws IOException,
LoginException, URISyntaxException {
SupportsBlocks = true;
@@ -93,7 +87,7 @@ public static void clusterSetupAtBeginning() throws IOException,
fsDefault = FS_HDFS[FS_INDEX_DEFAULT];
}
- @AfterClass
+ @AfterAll
public static void clusterShutdownAtEnd() throws Exception {
if (cluster != null) {
cluster.shutdown();
@@ -101,7 +95,7 @@ public static void clusterShutdownAtEnd() throws Exception {
}
@Override
- @Before
+ @BeforeEach
public void setUp() throws Exception {
fsTarget = fsDefault;
super.setUp();
@@ -191,9 +185,9 @@ public void testConfLinkMergeSlashWithRegularLinks() throws Exception {
fail("Shouldn't allow both merge slash link and regular link on same "
+ "mount table.");
} catch (IOException e) {
- assertTrue("Unexpected error message: " + e.getMessage(),
- e.getMessage().contains(expectedErrorMsg1) || e.getMessage()
- .contains(expectedErrorMsg2));
+ assertTrue(
+ e.getMessage().contains(expectedErrorMsg1) || e.getMessage()
+ .contains(expectedErrorMsg2), "Unexpected error message: " + e.getMessage());
}
}
@@ -226,9 +220,9 @@ public void testChildFileSystems() throws Exception {
LINK_MERGE_SLASH_CLUSTER_1_NAME, "/", null, null);
FileSystem fs = FileSystem.get(viewFsUri, conf);
FileSystem[] childFs = fs.getChildFileSystems();
- Assert.assertEquals("Unexpected number of child filesystems!",
- 1, childFs.length);
- Assert.assertEquals("Unexpected child filesystem!",
- DistributedFileSystem.class, childFs[0].getClass());
+ Assertions.assertEquals(
+ 1, childFs.length, "Unexpected number of child filesystems!");
+ Assertions.assertEquals(
+ DistributedFileSystem.class, childFs[0].getClass(), "Unexpected child filesystem!");
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLinkRegex.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLinkRegex.java
index d3afa47f7554b..4e2f34fb675f8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLinkRegex.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLinkRegex.java
@@ -36,16 +36,12 @@
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.hadoop.fs.viewfs.RegexMountPoint.INTERCEPTOR_INTERNAL_SEP;
-import static org.junit.Assert.assertSame;
+import static org.junit.jupiter.api.Assertions.assertSame;
/**
* Test linkRegex node type for view file system.
@@ -73,7 +69,7 @@ protected FileSystemTestHelper createFileSystemHelper() {
return new FileSystemTestHelper(TEST_BASE_PATH);
}
- @BeforeClass
+ @BeforeAll
public static void clusterSetupAtBeginning() throws IOException {
SupportsBlocks = true;
clusterConfig = ViewFileSystemTestSetup.createConfig();
@@ -91,7 +87,7 @@ public static void clusterSetupAtBeginning() throws IOException {
fsDefault = FS_HDFS[FS_INDEX_DEFAULT];
}
- @AfterClass
+ @AfterAll
public static void clusterShutdownAtEnd() throws Exception {
if (cluster != null) {
cluster.shutdown();
@@ -99,7 +95,7 @@ public static void clusterShutdownAtEnd() throws Exception {
}
@Override
- @Before
+ @BeforeEach
public void setUp() throws Exception {
fsTarget = fsDefault;
super.setUp();
@@ -157,7 +153,7 @@ public String linkInterceptorSettings(
private void createDirWithChildren(
FileSystem fileSystem, Path dir, List childrenFiles)
throws IOException {
- Assert.assertTrue(fileSystem.mkdirs(dir));
+ Assertions.assertTrue(fileSystem.mkdirs(dir));
int index = 0;
for (Path childFile : childrenFiles) {
createFile(fileSystem, childFile, index, true);
@@ -224,11 +220,11 @@ private void testRegexMountpoint(
URI viewFsUri = new URI(
FsConstants.VIEWFS_SCHEME, CLUSTER_NAME, "/", null, null);
try (FileSystem vfs = FileSystem.get(viewFsUri, config)) {
- Assert.assertEquals(expectedResolveResult.toString(),
+ Assertions.assertEquals(expectedResolveResult.toString(),
vfs.resolvePath(dirPathBeforeMountPoint).toString());
- Assert.assertTrue(
+ Assertions.assertTrue(
vfs.getFileStatus(dirPathBeforeMountPoint).isDirectory());
- Assert.assertEquals(
+ Assertions.assertEquals(
childrenFilesCnt, vfs.listStatus(dirPathBeforeMountPoint).length);
// Test Inner cache, the resolved result's filesystem should be the same.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeHdfsFileSystemContract.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeHdfsFileSystemContract.java
index dcfa051c3902d..714a08c79c290 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeHdfsFileSystemContract.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeHdfsFileSystemContract.java
@@ -19,8 +19,6 @@
import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME;
import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME_DEFAULT;
-import static org.junit.Assume.assumeTrue;
-
import java.io.File;
import java.io.IOException;
import java.net.URI;
@@ -39,11 +37,12 @@
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Ignore;
import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assumptions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Disabled;
/**
* Tests ViewFileSystemOverloadScheme with file system contract tests.
@@ -55,7 +54,7 @@ public class TestViewFileSystemOverloadSchemeHdfsFileSystemContract
private static String defaultWorkingDirectory;
private static Configuration conf = new HdfsConfiguration();
- @BeforeClass
+ @BeforeAll
public static void init() throws IOException {
final File basedir = GenericTestUtils.getRandomizedTestDir();
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY,
@@ -67,7 +66,7 @@ public static void init() throws IOException {
"/user/" + UserGroupInformation.getCurrentUser().getShortUserName();
}
- @Before
+ @BeforeEach
public void setUp() throws Exception {
conf.set(String.format("fs.%s.impl", "hdfs"),
ViewFileSystemOverloadScheme.class.getName());
@@ -89,7 +88,7 @@ public void setUp() throws Exception {
fs = FileSystem.get(conf);
}
- @AfterClass
+ @AfterAll
public static void tearDownAfter() throws Exception {
if (cluster != null) {
cluster.shutdown();
@@ -117,7 +116,7 @@ public void testRenameRootDirForbidden() throws Exception {
@Override
@Test
public void testListStatusRootDir() throws Throwable {
- assumeTrue(rootDirTestEnabled());
+ Assumptions.assumeTrue(rootDirTestEnabled());
Path dir = path("/");
Path child = path("/FileSystemContractBaseTest");
try (FileSystem dfs = ((ViewFileSystemOverloadScheme) fs).getRawFileSystem(
@@ -129,7 +128,7 @@ public void testListStatusRootDir() throws Throwable {
}
@Override
- @Ignore // This test same as above in this case.
+ @Disabled // This test same as above in this case.
public void testLSRootDir() throws Throwable {
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeWithHdfsScheme.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeWithHdfsScheme.java
index 650a4722798e8..e252bcbb01908 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeWithHdfsScheme.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeWithHdfsScheme.java
@@ -39,16 +39,16 @@
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.test.LambdaTestUtils;
import org.apache.hadoop.test.PathUtils;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME;
import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME_DEFAULT;
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.*;
/**
@@ -67,7 +67,7 @@ public class TestViewFileSystemOverloadSchemeWithHdfsScheme {
private static final String HDFS_USER_FOLDER = "/HDFSUser";
private static final String LOCAL_FOLDER = "/local";
- @BeforeClass
+ @BeforeAll
public static void init() throws IOException {
cluster =
new MiniDFSCluster.Builder(new Configuration()).numDataNodes(2).build();
@@ -77,7 +77,7 @@ public static void init() throws IOException {
/**
* Sets up the configurations and starts the MiniDFSCluster.
*/
- @Before
+ @BeforeEach
public void setUp() throws IOException {
Configuration config = getNewConf();
config.setInt(
@@ -91,10 +91,10 @@ public void setUp() throws IOException {
URI.create(config.get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY));
localTargetDir = new File(TEST_ROOT_DIR, "/root/");
localTargetDir.mkdirs();
- Assert.assertEquals(HDFS_SCHEME, defaultFSURI.getScheme()); // hdfs scheme.
+ Assertions.assertEquals(HDFS_SCHEME, defaultFSURI.getScheme()); // hdfs scheme.
}
- @After
+ @AfterEach
public void cleanUp() throws IOException {
if (cluster != null) {
FileSystem fs = new DistributedFileSystem();
@@ -102,7 +102,7 @@ public void cleanUp() throws IOException {
try {
FileStatus[] statuses = fs.listStatus(new Path("/"));
for (FileStatus st : statuses) {
- Assert.assertTrue(fs.delete(st.getPath(), true));
+ Assertions.assertTrue(fs.delete(st.getPath(), true));
}
} finally {
fs.close();
@@ -111,7 +111,7 @@ public void cleanUp() throws IOException {
}
}
- @AfterClass
+ @AfterAll
public static void tearDown() throws IOException {
if (cluster != null) {
FileSystem.closeAll();
@@ -154,7 +154,7 @@ public void testMountLinkWithLocalAndHDFS() throws Exception {
try (FileSystem fs
= FileSystem.get(conf)) {
- Assert.assertEquals(2, fs.getChildFileSystems().length);
+ Assertions.assertEquals(2, fs.getChildFileSystems().length);
fs.createNewFile(hdfsFile); // /HDFSUser/testfile
fs.mkdirs(localDir); // /local/test
}
@@ -162,20 +162,20 @@ public void testMountLinkWithLocalAndHDFS() throws Exception {
// Initialize HDFS and test files exist in ls or not
try (DistributedFileSystem dfs = new DistributedFileSystem()) {
dfs.initialize(defaultFSURI, conf);
- Assert.assertTrue(dfs.exists(
+ Assertions.assertTrue(dfs.exists(
new Path(Path.getPathWithoutSchemeAndAuthority(hdfsTargetPath),
hdfsFile.getName()))); // should be in hdfs.
- Assert.assertFalse(dfs.exists(
+ Assertions.assertFalse(dfs.exists(
new Path(Path.getPathWithoutSchemeAndAuthority(localTragetPath),
localDir.getName()))); // should not be in local fs.
}
try (RawLocalFileSystem lfs = new RawLocalFileSystem()) {
lfs.initialize(localTragetPath.toUri(), conf);
- Assert.assertFalse(lfs.exists(
+ Assertions.assertFalse(lfs.exists(
new Path(Path.getPathWithoutSchemeAndAuthority(hdfsTargetPath),
hdfsFile.getName()))); // should not be in hdfs.
- Assert.assertTrue(lfs.exists(
+ Assertions.assertTrue(lfs.exists(
new Path(Path.getPathWithoutSchemeAndAuthority(localTragetPath),
localDir.getName()))); // should be in local fs.
}
@@ -210,7 +210,7 @@ public void testMountLinkWithNonExistentLink(boolean expectFsInitFailure)
});
} else {
try (FileSystem fs = FileSystem.get(conf)) {
- Assert.assertEquals("hdfs", fs.getScheme());
+ Assertions.assertEquals("hdfs", fs.getScheme());
}
}
}
@@ -241,14 +241,14 @@ public void testListStatusOnRootShouldListAllMountLinks() throws Exception {
try (FileSystem fs = FileSystem.get(conf)) {
fs.mkdirs(hdfsTargetPath);
FileStatus[] ls = fs.listStatus(new Path("/"));
- Assert.assertEquals(2, ls.length);
+ Assertions.assertEquals(2, ls.length);
String lsPath1 =
Path.getPathWithoutSchemeAndAuthority(ls[0].getPath()).toString();
String lsPath2 =
Path.getPathWithoutSchemeAndAuthority(ls[1].getPath()).toString();
- Assert.assertTrue(
+ Assertions.assertTrue(
HDFS_USER_FOLDER.equals(lsPath1) || LOCAL_FOLDER.equals(lsPath1));
- Assert.assertTrue(
+ Assertions.assertTrue(
HDFS_USER_FOLDER.equals(lsPath2) || LOCAL_FOLDER.equals(lsPath2));
}
}
@@ -270,7 +270,7 @@ public void testListStatusOnNonMountedPath() throws Exception {
try (FileSystem fs = FileSystem.get(conf)) {
fs.listStatus(new Path("/nonMount"));
- Assert.fail("It should fail as no mount link with /nonMount");
+ Assertions.fail("It should fail as no mount link with /nonMount");
}
}
@@ -349,8 +349,8 @@ public void testWithLinkFallBack() throws Exception {
try (FileSystem fs = FileSystem.get(conf)) {
fs.createNewFile(new Path("/nonMount/myfile"));
FileStatus[] ls = fs.listStatus(new Path("/nonMount"));
- Assert.assertEquals(1, ls.length);
- Assert.assertEquals(
+ Assertions.assertEquals(1, ls.length);
+ Assertions.assertEquals(
Path.getPathWithoutSchemeAndAuthority(ls[0].getPath()).getName(),
"myfile");
}
@@ -376,7 +376,7 @@ public void testCreateOnRoot(boolean fallbackExist) throws Exception {
localTargetDir.toURI().toString()}, conf);
try (FileSystem fs = FileSystem.get(conf)) {
if (fallbackExist) {
- Assert.assertTrue(fs.createNewFile(new Path("/newFileOnRoot")));
+ Assertions.assertTrue(fs.createNewFile(new Path("/newFileOnRoot")));
} else {
LambdaTestUtils.intercept(NotInMountpointException.class, () -> {
fs.createNewFile(new Path("/newFileOnRoot"));
@@ -422,7 +422,7 @@ public void testInvalidOverloadSchemeTargetFS() throws Exception {
try (FileSystem fs = FileSystem.get(conf)) {
fs.createNewFile(new Path("/onRootWhenFallBack"));
- Assert.fail("OverloadScheme target fs should be valid.");
+ Assertions.fail("OverloadScheme target fs should be valid.");
}
}
@@ -446,7 +446,7 @@ public void testViewFsOverloadSchemeWhenInnerCacheDisabled()
try (FileSystem fs = FileSystem.get(conf)) {
Path testFile = new Path(HDFS_USER_FOLDER + "/testFile");
fs.createNewFile(testFile);
- Assert.assertTrue(fs.exists(testFile));
+ Assertions.assertTrue(fs.exists(testFile));
}
}
@@ -470,13 +470,13 @@ public void testViewFsOverloadSchemeWithInnerCache()
// 1. Only 1 hdfs child file system should be there with cache.
try (FileSystem vfs = FileSystem.get(conf)) {
- Assert.assertEquals(1, vfs.getChildFileSystems().length);
+ Assertions.assertEquals(1, vfs.getChildFileSystems().length);
}
// 2. Two hdfs file systems should be there if no cache.
conf.setBoolean(Constants.CONFIG_VIEWFS_ENABLE_INNER_CACHE, false);
try (FileSystem vfs = FileSystem.get(conf)) {
- Assert.assertEquals(isFallBackExist(conf) ? 3 : 2,
+ Assertions.assertEquals(isFallBackExist(conf) ? 3 : 2,
vfs.getChildFileSystems().length);
}
}
@@ -509,7 +509,7 @@ public void testViewFsOverloadSchemeWithNoInnerCacheAndHdfsTargets()
conf.setBoolean(Constants.CONFIG_VIEWFS_ENABLE_INNER_CACHE, false);
// Two hdfs file systems should be there if no cache.
try (FileSystem vfs = FileSystem.get(conf)) {
- Assert.assertEquals(isFallBackExist(conf) ? 3 : 2,
+ Assertions.assertEquals(isFallBackExist(conf) ? 3 : 2,
vfs.getChildFileSystems().length);
}
}
@@ -537,7 +537,7 @@ public void testViewFsOverloadSchemeWithNoInnerCacheAndLocalSchemeTargets()
// cache should work.
conf.setBoolean(Constants.CONFIG_VIEWFS_ENABLE_INNER_CACHE, false);
try (FileSystem vfs = FileSystem.get(conf)) {
- Assert.assertEquals(isFallBackExist(conf) ? 2 : 1,
+ Assertions.assertEquals(isFallBackExist(conf) ? 2 : 1,
vfs.getChildFileSystems().length);
}
}
@@ -561,7 +561,7 @@ public void testNflyRename() throws Exception {
final Path testDir = new Path("/nflyroot/testdir1/sub1/sub3");
final Path testDirTmp = new Path("/nflyroot/testdir1/sub1/sub3_temp");
- assertTrue(testDir + ": Failed to create!", nfly.mkdirs(testDir));
+ assertTrue(nfly.mkdirs(testDir), testDir + ": Failed to create!");
// Test renames
assertTrue(nfly.rename(testDir, testDirTmp));
@@ -570,7 +570,7 @@ public void testNflyRename() throws Exception {
final URI[] testUris = new URI[] {uri1, uri2 };
for (final URI testUri : testUris) {
final FileSystem fs = FileSystem.get(testUri, conf);
- assertTrue(testDir + " should exist!", fs.exists(testDir));
+ assertTrue(fs.exists(testDir), testDir + " should exist!");
}
}
@@ -688,7 +688,7 @@ private void writeString(final FileSystem nfly, final String testString,
private void readString(final FileSystem nfly, final Path testFile,
final String testString, final URI testUri) throws IOException {
try (FSDataInputStream fsDis = nfly.open(testFile)) {
- assertEquals("Wrong file content", testString, fsDis.readUTF());
+ assertEquals(testString, fsDis.readUTF(), "Wrong file content");
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithAcls.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithAcls.java
index 10b6f17ad2843..bb3eb305abf5d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithAcls.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithAcls.java
@@ -28,11 +28,7 @@
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.util.Lists;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.*;
import java.io.IOException;
import java.util.List;
@@ -42,8 +38,8 @@
import static org.apache.hadoop.fs.permission.AclEntryType.*;
import static org.apache.hadoop.fs.permission.FsAction.*;
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
/**
* Verify ACL through ViewFileSystem functionality.
@@ -61,7 +57,7 @@ public class TestViewFileSystemWithAcls {
private FileSystemTestHelper fileSystemTestHelper =
new FileSystemTestHelper("/tmp/TestViewFileSystemWithAcls");
- @BeforeClass
+ @BeforeAll
public static void clusterSetupAtBeginning() throws IOException {
clusterConf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
cluster = new MiniDFSCluster.Builder(clusterConf)
@@ -74,14 +70,14 @@ public static void clusterSetupAtBeginning() throws IOException {
fHdfs2 = cluster.getFileSystem(1);
}
- @AfterClass
+ @AfterAll
public static void ClusterShutdownAtEnd() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
- @Before
+ @BeforeEach
public void setUp() throws Exception {
fsTarget = fHdfs;
fsTarget2 = fHdfs2;
@@ -105,7 +101,7 @@ private void setupMountPoints() {
ConfigUtil.addLink(fsViewConf, mountOnNn2.toString(), targetTestRoot2.toUri());
}
- @After
+ @AfterEach
public void tearDown() throws Exception {
fsTarget.delete(fileSystemTestHelper.getTestRootPath(fsTarget), true);
fsTarget2.delete(fileSystemTestHelper.getTestRootPath(fsTarget2), true);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithTruncate.java
index 2f44b46aa3056..43a2288f8f49c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithTruncate.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithTruncate.java
@@ -30,13 +30,13 @@
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.assertEquals;
/**
* Verify truncate through ViewFileSystem functionality.
@@ -53,7 +53,7 @@ public class TestViewFileSystemWithTruncate {
private FileSystemTestHelper fileSystemTestHelper =
new FileSystemTestHelper("/tmp/TestViewFileSystemWithXAttrs");
- @BeforeClass
+ @BeforeAll
public static void clusterSetupAtBeginning() throws IOException {
cluster = new MiniDFSCluster.Builder(clusterConf)
.nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
@@ -63,14 +63,14 @@ public static void clusterSetupAtBeginning() throws IOException {
fHdfs = cluster.getFileSystem(0);
}
- @AfterClass
+ @AfterAll
public static void clusterShutdownAtEnd() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
- @Before
+ @BeforeEach
public void setUp() throws Exception {
fsTarget = fHdfs;
targetTestRoot = fileSystemTestHelper.getAbsoluteTestRootPath(fsTarget);
@@ -89,7 +89,7 @@ private void setupMountPoints() {
.addLink(fsViewConf, mountOnNn1.toString(), targetTestRoot.toUri());
}
- @After
+ @AfterEach
public void tearDown() throws Exception {
fsTarget.delete(fileSystemTestHelper.getTestRootPath(fsTarget), true);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithXAttrs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithXAttrs.java
index b487188c4e1fa..89a000f6cdb96 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithXAttrs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithXAttrs.java
@@ -24,16 +24,12 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.*;
import java.io.IOException;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
/**
* Verify XAttrs through ViewFileSystem functionality.
@@ -57,7 +53,7 @@ public class TestViewFileSystemWithXAttrs {
protected static final String name2 = "user.a2";
protected static final byte[] value2 = {0x37, 0x38, 0x39};
- @BeforeClass
+ @BeforeAll
public static void clusterSetupAtBeginning() throws IOException {
cluster = new MiniDFSCluster.Builder(clusterConf)
.nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
@@ -69,14 +65,14 @@ public static void clusterSetupAtBeginning() throws IOException {
fHdfs2 = cluster.getFileSystem(1);
}
- @AfterClass
+ @AfterAll
public static void ClusterShutdownAtEnd() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
- @Before
+ @BeforeEach
public void setUp() throws Exception {
fsTarget = fHdfs;
fsTarget2 = fHdfs2;
@@ -102,7 +98,7 @@ private void setupMountPoints() {
targetTestRoot2.toUri());
}
- @After
+ @AfterEach
public void tearDown() throws Exception {
fsTarget.delete(fileSystemTestHelper.getTestRootPath(fsTarget), true);
fsTarget2.delete(fileSystemTestHelper.getTestRootPath(fsTarget2), true);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsAtHdfsRoot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsAtHdfsRoot.java
index 886646518838b..7b95e9c265e42 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsAtHdfsRoot.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsAtHdfsRoot.java
@@ -30,9 +30,9 @@
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
/**
* Make sure that ViewFs works when the root of an FS is mounted to a ViewFs
@@ -49,7 +49,7 @@ protected FileContextTestHelper createFileContextHelper() {
return new FileContextTestHelper("/tmp/TestViewFsAtHdfsRoot");
}
- @BeforeClass
+ @BeforeAll
public static void clusterSetupAtBegining() throws IOException,
LoginException, URISyntaxException {
SupportsBlocks = true;
@@ -62,7 +62,7 @@ public static void clusterSetupAtBegining() throws IOException,
}
- @AfterClass
+ @AfterAll
public static void ClusterShutdownAtEnd() throws Exception {
if (cluster != null) {
cluster.shutdown();
@@ -70,7 +70,7 @@ public static void ClusterShutdownAtEnd() throws Exception {
}
@Override
- @Before
+ @BeforeEach
public void setUp() throws Exception {
// create the test root on local_fs
fcTarget = fc;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsDefaultValue.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsDefaultValue.java
index a49735c2e86cd..60ee8018215a9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsDefaultValue.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsDefaultValue.java
@@ -28,9 +28,7 @@
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
import java.io.IOException;
import java.net.URI;
@@ -49,9 +47,9 @@
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
/**
* Tests for viewfs implementation of default fs level values.
@@ -74,7 +72,7 @@ public class TestViewFsDefaultValue {
// Use NotInMountpoint path to trigger the exception
private static Path notInMountpointPath;
- @BeforeClass
+ @BeforeAll
public static void clusterSetupAtBegining() throws IOException,
LoginException, URISyntaxException {
@@ -218,7 +216,7 @@ public void testGetQuotaUsageWithQuotaDefined() throws IOException {
assertTrue(qu.getSpaceConsumed() > 0);
}
- @AfterClass
+ @AfterAll
public static void cleanup() throws IOException {
fHdfs.delete(new Path(testFileName), true);
fHdfs.delete(notInMountpointPath, true);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsFileStatusHdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsFileStatusHdfs.java
index e3b4fe25fc332..e8476c1479644 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsFileStatusHdfs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsFileStatusHdfs.java
@@ -23,8 +23,8 @@
* Since viewfs has overlayed ViewFsFileStatus, we ran into
* serialization problems. THis test is test the fix.
*/
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
import java.io.IOException;
import java.net.URI;
@@ -43,9 +43,9 @@
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
public class TestViewFsFileStatusHdfs {
@@ -59,7 +59,7 @@ public class TestViewFsFileStatusHdfs {
private static FileSystem fHdfs;
private static FileSystem vfs;
- @BeforeClass
+ @BeforeAll
public static void clusterSetupAtBegining() throws IOException,
LoginException, URISyntaxException {
cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build();
@@ -108,15 +108,15 @@ public void testGetFileChecksum() throws IOException, URISyntaxException {
// Get checksum of different file in HDFS
FileChecksum otherHdfsFileCheckSum = fHdfs.getFileChecksum(
new Path(someFile+"other"));
- // Checksums of the same file (got through HDFS and ViewFS should be same)
- assertEquals("HDFS and ViewFS checksums were not the same", viewFSCheckSum,
- hdfsCheckSum);
- // Checksum of different files should be different.
- assertFalse("Some other HDFS file which should not have had the same " +
- "checksum as viewFS did!", viewFSCheckSum.equals(otherHdfsFileCheckSum));
+ // Checksums of the same file (got through HDFS and ViewFS should be same)
+ assertEquals(viewFSCheckSum,
+ hdfsCheckSum, "HDFS and ViewFS checksums were not the same");
+ // Checksum of different files should be different.
+ assertFalse(viewFSCheckSum.equals(otherHdfsFileCheckSum), "Some other HDFS file which should not have had the same " +
+ "checksum as viewFS did!");
}
- @AfterClass
+ @AfterAll
public static void cleanup() throws IOException {
fHdfs.delete(new Path(testfilename), true);
fHdfs.delete(new Path(someFile), true);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java
index 540883dd2e208..fdd30dcd23772 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java
@@ -36,13 +36,13 @@
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.fail;
public class TestViewFsHdfs extends ViewFsBaseTest {
@@ -56,7 +56,7 @@ protected FileContextTestHelper createFileContextHelper() {
}
- @BeforeClass
+ @BeforeAll
public static void clusterSetupAtBegining() throws IOException,
LoginException, URISyntaxException {
SupportsBlocks = true;
@@ -72,7 +72,7 @@ public static void clusterSetupAtBegining() throws IOException,
}
- @AfterClass
+ @AfterAll
public static void ClusterShutdownAtEnd() throws Exception {
if (cluster != null) {
cluster.shutdown();
@@ -80,7 +80,7 @@ public static void ClusterShutdownAtEnd() throws Exception {
}
@Override
- @Before
+ @BeforeEach
public void setUp() throws Exception {
// create the test root on local_fs
fcTarget = fc;
@@ -160,7 +160,7 @@ public Object run() throws IOException {
FileContext otherfs = map.get("user1");
otherfs.mkdir(user1Path, FileContext.DEFAULT_PERM, false);
String owner = otherfs.getFileStatus(user1Path).getOwner();
- assertEquals("The owner did not match ", owner, userUgi.getShortUserName());
+ assertEquals(owner, userUgi.getShortUserName(), "The owner did not match ");
otherfs.delete(user1Path, false);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsLinkFallback.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsLinkFallback.java
index 09e02be640e5e..e2f8c8e86333c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsLinkFallback.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsLinkFallback.java
@@ -18,10 +18,7 @@
package org.apache.hadoop.fs.viewfs;
import static org.apache.hadoop.fs.CreateFlag.CREATE;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
import java.io.FileNotFoundException;
import java.io.IOException;
@@ -46,11 +43,11 @@
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.test.LambdaTestUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
/**
* Test for viewfs with LinkFallback mount table entries.
@@ -62,7 +59,7 @@ public class TestViewFsLinkFallback {
private static URI viewFsDefaultClusterUri;
private Path targetTestRoot;
- @BeforeClass
+ @BeforeAll
public static void clusterSetupAtBeginning()
throws IOException, URISyntaxException {
int nameSpacesCount = 3;
@@ -88,14 +85,14 @@ public static void clusterSetupAtBeginning()
}
- @AfterClass
+ @AfterAll
public static void clusterShutdownAtEnd() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
- @Before
+ @BeforeEach
public void setUp() throws Exception {
fsTarget = fsDefault;
initializeTargetTestRoot();
@@ -295,7 +292,7 @@ public void testMkdirShouldFailWhenFallbackFSNotAvailable()
// attempt to create in fallback.
vfs.mkdir(nextLevelToInternalDir, FsPermission.getDirDefault(),
false);
- Assert.fail("It should throw IOE when fallback fs not available.");
+ Assertions.fail("It should throw IOE when fallback fs not available.");
} catch (IOException e) {
cluster.restartNameNodes();
// should succeed when fallback fs is back to normal.
@@ -570,7 +567,7 @@ private void verifyRename(AbstractFileSystem fs, Path src, Path dst)
fs.rename(src, dst, Options.Rename.OVERWRITE);
LambdaTestUtils
.intercept(FileNotFoundException.class, () -> fs.getFileStatus(src));
- Assert.assertNotNull(fs.getFileStatus(dst));
+ Assertions.assertNotNull(fs.getFileStatus(dst));
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithAcls.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithAcls.java
index 1243add66a136..689f98f3df715 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithAcls.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithAcls.java
@@ -29,11 +29,8 @@
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.util.Lists;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.*;
+
import java.util.List;
import java.io.IOException;
@@ -44,8 +41,8 @@
import static org.apache.hadoop.fs.permission.FsAction.*;
import static org.apache.hadoop.fs.permission.FsAction.NONE;
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
/**
* Verify ACL through ViewFs functionality.
@@ -61,7 +58,7 @@ public class TestViewFsWithAcls {
private FileContextTestHelper fileContextTestHelper =
new FileContextTestHelper("/tmp/TestViewFsWithAcls");
- @BeforeClass
+ @BeforeAll
public static void clusterSetupAtBeginning() throws IOException {
clusterConf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
cluster = new MiniDFSCluster.Builder(clusterConf)
@@ -74,14 +71,14 @@ public static void clusterSetupAtBeginning() throws IOException {
fc2 = FileContext.getFileContext(cluster.getURI(1), clusterConf);
}
- @AfterClass
+ @AfterAll
public static void ClusterShutdownAtEnd() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
- @Before
+ @BeforeEach
public void setUp() throws Exception {
fcTarget = fc;
fcTarget2 = fc2;
@@ -105,7 +102,7 @@ private void setupMountPoints() {
ConfigUtil.addLink(fsViewConf, mountOnNn2.toString(), targetTestRoot2.toUri());
}
- @After
+ @AfterEach
public void tearDown() throws Exception {
fcTarget.delete(fileContextTestHelper.getTestRootPath(fcTarget), true);
fcTarget2.delete(fileContextTestHelper.getTestRootPath(fcTarget2), true);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithXAttrs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithXAttrs.java
index 9a4223a86fe20..452be3f122d08 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithXAttrs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithXAttrs.java
@@ -25,16 +25,12 @@
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.*;
import java.io.IOException;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
/**
* Verify XAttrs through ViewFs functionality.
@@ -56,7 +52,7 @@ public class TestViewFsWithXAttrs {
protected static final String name2 = "user.a2";
protected static final byte[] value2 = {0x37, 0x38, 0x39};
- @BeforeClass
+ @BeforeAll
public static void clusterSetupAtBeginning() throws IOException {
cluster = new MiniDFSCluster.Builder(clusterConf)
.nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
@@ -68,14 +64,14 @@ public static void clusterSetupAtBeginning() throws IOException {
fc2 = FileContext.getFileContext(cluster.getURI(1), clusterConf);
}
- @AfterClass
+ @AfterAll
public static void ClusterShutdownAtEnd() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
- @Before
+ @BeforeEach
public void setUp() throws Exception {
fcTarget = fc;
fcTarget2 = fc2;
@@ -99,7 +95,7 @@ private void setupMountPoints() {
ConfigUtil.addLink(fsViewConf, mountOnNn2.toString(), targetTestRoot2.toUri());
}
- @After
+ @AfterEach
public void tearDown() throws Exception {
fcTarget.delete(fileContextTestHelper.getTestRootPath(fcTarget), true);
fcTarget2.delete(fileContextTestHelper.getTestRootPath(fcTarget2), true);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AdminStatesBaseTest.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AdminStatesBaseTest.java
index 10b18032e13e3..5ad280c8b5511 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AdminStatesBaseTest.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AdminStatesBaseTest.java
@@ -17,8 +17,8 @@
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.IOException;
import java.util.ArrayList;
@@ -29,6 +29,7 @@
import java.util.Random;
import org.apache.hadoop.util.Lists;
+import org.junit.jupiter.api.AfterEach;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
@@ -46,8 +47,7 @@
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.hdfs.util.HostsFileWriter;
-import org.junit.After;
-import org.junit.Before;
+import org.junit.jupiter.api.BeforeEach;
/**
* This class provide utilities for testing of the admin operations of nodes.
@@ -81,7 +81,7 @@ protected MiniDFSCluster getCluster() {
return cluster;
}
- @Before
+ @BeforeEach
public void setup() throws IOException {
// Set up the hosts/exclude files.
hostsFileWriter = new HostsFileWriter();
@@ -108,7 +108,7 @@ public void setup() throws IOException {
}
- @After
+ @AfterEach
public void teardown() throws IOException {
hostsFileWriter.cleanup();
shutdownCluster();
@@ -381,7 +381,7 @@ protected DFSClient getDfsClient(final int nnIndex) throws IOException {
protected static void validateCluster(DFSClient client, int numDNs)
throws IOException {
DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
- assertEquals("Number of Datanodes ", numDNs, info.length);
+ assertEquals(numDNs, info.length, "Number of Datanodes ");
}
/** Start a MiniDFSCluster.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java
index f7d90d2b19826..208714c887f53 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java
@@ -17,9 +17,7 @@
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
import java.io.IOException;
import java.io.OutputStream;
@@ -97,9 +95,9 @@ public static int[] randomFilePartition(int n, int parts) {
}
LOG.info("partition=" + Arrays.toString(p));
- assertTrue("i=0", p[0] > 0 && p[0] < n);
+ assertTrue(p[0] > 0 && p[0] < n, "i=0");
for(int i = 1; i < p.length; i++) {
- assertTrue("i=" + i, p[i] > p[i - 1] && p[i] < n);
+ assertTrue(p[i] > p[i - 1] && p[i] < n, "i=" + i);
}
return p;
}
@@ -217,8 +215,8 @@ public static void checkFullFile(FileSystem fs, Path name, int len,
boolean checkFileStatus) throws IOException {
if (checkFileStatus) {
final FileStatus status = fs.getFileStatus(name);
- assertEquals("len=" + len + " but status.getLen()=" + status.getLen(),
- len, status.getLen());
+ assertEquals(
+ len, status.getLen(), "len=" + len + " but status.getLen()=" + status.getLen());
}
FSDataInputStream stm = fs.open(name);
@@ -231,9 +229,9 @@ public static void checkFullFile(FileSystem fs, Path name, int len,
private static void checkData(final byte[] actual, int from,
final byte[] expected, String message) {
for (int idx = 0; idx < actual.length; idx++) {
- assertEquals(message+" byte "+(from+idx)+" differs. expected "+
- expected[from+idx]+" actual "+actual[idx],
- expected[from+idx], actual[idx]);
+ assertEquals(
+ expected[from + idx], actual[idx], message + " byte " + (from + idx) + " differs. expected " +
+ expected[from + idx] + " actual " + actual[idx]);
actual[idx] = 0;
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index d813375e2748f..f0c44bcda18fb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -25,10 +25,7 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
import java.io.BufferedOutputStream;
import java.io.BufferedReader;
@@ -190,8 +187,8 @@
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.VersionInfo;
-import org.junit.Assert;
-import org.junit.Assume;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Assumptions;
import org.apache.hadoop.util.ToolRunner;
import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting;
@@ -1672,15 +1669,15 @@ public static long roundUpToMultiple(long val, int factor) {
}
public static void checkComponentsEquals(byte[][] expected, byte[][] actual) {
- assertEquals("expected: " + DFSUtil.byteArray2PathString(expected)
- + ", actual: " + DFSUtil.byteArray2PathString(actual), expected.length,
- actual.length);
+ assertEquals(expected.length,
+ actual.length, "expected: " + DFSUtil.byteArray2PathString(expected)
+ + ", actual: " + DFSUtil.byteArray2PathString(actual));
int i = 0;
for (byte[] e : expected) {
byte[] actualComponent = actual[i++];
- assertTrue("expected: " + DFSUtil.bytes2String(e) + ", actual: "
- + DFSUtil.bytes2String(actualComponent),
- Arrays.equals(e, actualComponent));
+ assertTrue(
+ Arrays.equals(e, actualComponent), "expected: " + DFSUtil.bytes2String(e) + ", actual: "
+ + DFSUtil.bytes2String(actualComponent));
}
}
@@ -1699,7 +1696,7 @@ public ShortCircuitTestContext(String testName) {
this.sockDir = new TemporarySocketDirectory();
DomainSocket.disableBindPathValidation();
formerTcpReadsDisabled = DFSInputStream.tcpReadsDisabledForTesting;
- Assume.assumeTrue(DomainSocket.getLoadingFailureReason() == null);
+ Assumptions.assumeTrue(DomainSocket.getLoadingFailureReason() == null);
}
public Configuration newConfiguration() {
@@ -1737,7 +1734,7 @@ public static void verifyFilesEqual(FileSystem fs, Path p1, Path p2, int len)
try (FSDataInputStream in1 = fs.open(p1);
FSDataInputStream in2 = fs.open(p2)) {
for (int i = 0; i < len; i++) {
- assertEquals("Mismatch at byte " + i, in1.read(), in2.read());
+ assertEquals(in1.read(), in2.read(), "Mismatch at byte " + i);
}
}
}
@@ -1813,32 +1810,32 @@ public static void verifyClientStats(Configuration conf,
client.getReplicatedBlockStats();
ECBlockGroupStats ecBlockGroupStats = client.getECBlockGroupStats();
- assertEquals("Under replicated stats not matching!",
- aggregatedStats[ClientProtocol.GET_STATS_LOW_REDUNDANCY_IDX],
- aggregatedStats[ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX]);
- assertEquals("Low redundancy stats not matching!",
- aggregatedStats[ClientProtocol.GET_STATS_LOW_REDUNDANCY_IDX],
- replicatedBlockStats.getLowRedundancyBlocks() +
- ecBlockGroupStats.getLowRedundancyBlockGroups());
- assertEquals("Corrupt blocks stats not matching!",
- aggregatedStats[ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX],
- replicatedBlockStats.getCorruptBlocks() +
- ecBlockGroupStats.getCorruptBlockGroups());
- assertEquals("Missing blocks stats not matching!",
- aggregatedStats[ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX],
- replicatedBlockStats.getMissingReplicaBlocks() +
- ecBlockGroupStats.getMissingBlockGroups());
- assertEquals("Missing blocks with replication factor one not matching!",
- aggregatedStats[ClientProtocol.GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX],
- replicatedBlockStats.getMissingReplicationOneBlocks());
- assertEquals("Bytes in future blocks stats not matching!",
- aggregatedStats[ClientProtocol.GET_STATS_BYTES_IN_FUTURE_BLOCKS_IDX],
- replicatedBlockStats.getBytesInFutureBlocks() +
- ecBlockGroupStats.getBytesInFutureBlockGroups());
- assertEquals("Pending deletion blocks stats not matching!",
- aggregatedStats[ClientProtocol.GET_STATS_PENDING_DELETION_BLOCKS_IDX],
- replicatedBlockStats.getPendingDeletionBlocks() +
- ecBlockGroupStats.getPendingDeletionBlocks());
+ assertEquals(
+ aggregatedStats[ClientProtocol.GET_STATS_LOW_REDUNDANCY_IDX],
+ aggregatedStats[ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX], "Under replicated stats not matching!");
+ assertEquals(
+ aggregatedStats[ClientProtocol.GET_STATS_LOW_REDUNDANCY_IDX],
+ replicatedBlockStats.getLowRedundancyBlocks() +
+ ecBlockGroupStats.getLowRedundancyBlockGroups(), "Low redundancy stats not matching!");
+ assertEquals(
+ aggregatedStats[ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX],
+ replicatedBlockStats.getCorruptBlocks() +
+ ecBlockGroupStats.getCorruptBlockGroups(), "Corrupt blocks stats not matching!");
+ assertEquals(
+ aggregatedStats[ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX],
+ replicatedBlockStats.getMissingReplicaBlocks() +
+ ecBlockGroupStats.getMissingBlockGroups(), "Missing blocks stats not matching!");
+ assertEquals(
+ aggregatedStats[ClientProtocol.GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX],
+ replicatedBlockStats.getMissingReplicationOneBlocks(), "Missing blocks with replication factor one not matching!");
+ assertEquals(
+ aggregatedStats[ClientProtocol.GET_STATS_BYTES_IN_FUTURE_BLOCKS_IDX],
+ replicatedBlockStats.getBytesInFutureBlocks() +
+ ecBlockGroupStats.getBytesInFutureBlockGroups(), "Bytes in future blocks stats not matching!");
+ assertEquals(
+ aggregatedStats[ClientProtocol.GET_STATS_PENDING_DELETION_BLOCKS_IDX],
+ replicatedBlockStats.getPendingDeletionBlocks() +
+ ecBlockGroupStats.getPendingDeletionBlocks(), "Pending deletion blocks stats not matching!");
}
/**
@@ -1884,8 +1881,8 @@ public static DatanodeDescriptor getExpectedPrimaryNode(NameNode nn,
ExtendedBlock blk) {
BlockManager bm0 = nn.getNamesystem().getBlockManager();
BlockInfo storedBlock = bm0.getStoredBlock(blk.getLocalBlock());
- assertTrue("Block " + blk + " should be under construction, " +
- "got: " + storedBlock, !storedBlock.isComplete());
+ assertTrue(!storedBlock.isComplete(), "Block " + blk + " should be under construction, " +
+ "got: " + storedBlock);
// We expect that the replica with the most recent heart beat will be
// the one to be in charge of the synchronization / recovery protocol.
final DatanodeStorageInfo[] storages = storedBlock
@@ -1933,8 +1930,8 @@ public static void toolRun(Tool tool, String cmd, int retcode, String contain)
}
assertEquals(retcode, ret);
if (contain != null) {
- assertTrue("The real output is: " + output + ".\n It should contain: "
- + contain, output.contains(contain));
+ assertTrue(output.contains(contain), "The real output is: " + output + ".\n It should contain: "
+ + contain);
}
}
@@ -2338,23 +2335,23 @@ public static void verifyDelete(FsShell shell, FileSystem fs, Path path,
public static void verifyDelete(FsShell shell, FileSystem fs, Path path,
Path trashPath, boolean shouldExistInTrash) throws Exception {
- assertTrue(path + " file does not exist", fs.exists(path));
+ assertTrue(fs.exists(path), path + " file does not exist");
// Verify that trashPath has a path component named ".Trash"
Path checkTrash = trashPath;
while (!checkTrash.isRoot() && !checkTrash.getName().equals(".Trash")) {
checkTrash = checkTrash.getParent();
}
- assertEquals("No .Trash component found in trash path " + trashPath,
- ".Trash", checkTrash.getName());
+ assertEquals(
+ ".Trash", checkTrash.getName(), "No .Trash component found in trash path " + trashPath);
String[] argv = new String[]{"-rm", "-r", path.toString()};
int res = ToolRunner.run(shell, argv);
- assertEquals("rm failed", 0, res);
+ assertEquals(0, res, "rm failed");
if (shouldExistInTrash) {
- assertTrue("File not in trash : " + trashPath, fs.exists(trashPath));
+ assertTrue(fs.exists(trashPath), "File not in trash : " + trashPath);
} else {
- assertFalse("File in trash : " + trashPath, fs.exists(trashPath));
+ assertFalse(fs.exists(trashPath), "File in trash : " + trashPath);
}
}
@@ -2563,7 +2560,7 @@ public static NameNodeConnector getNameNodeConnector(Configuration conf,
Path filePath, int namenodeCount, boolean createMoverPath)
throws IOException {
final Collection namenodes = DFSUtil.getInternalNsRpcUris(conf);
- Assert.assertEquals(namenodeCount, namenodes.size());
+ Assertions.assertEquals(namenodeCount, namenodes.size());
NameNodeConnector.checkOtherInstanceRunning(createMoverPath);
while (true) {
try {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java
index a8f7378ca0bc0..ea733e425ac87 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java
@@ -26,9 +26,9 @@
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
/** This is a comprehensive append test that tries
* all combinations of file length and number of appended bytes
@@ -59,7 +59,7 @@ private static void init(Configuration conf) {
conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, PACKET_SIZE);
}
- @BeforeClass
+ @BeforeAll
public static void startUp () throws IOException {
conf = new HdfsConfiguration();
init(conf);
@@ -67,7 +67,7 @@ public static void startUp () throws IOException {
fs = cluster.getFileSystem();
}
- @AfterClass
+ @AfterAll
public static void tearDown() {
if (cluster != null) {
cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ParameterizedTestDFSStripedOutputStreamWithFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ParameterizedTestDFSStripedOutputStreamWithFailure.java
index 284fdb77acac3..b43c9fcd041a7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ParameterizedTestDFSStripedOutputStreamWithFailure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ParameterizedTestDFSStripedOutputStreamWithFailure.java
@@ -27,7 +27,7 @@
import java.util.Collection;
import java.util.List;
-import static org.junit.Assume.assumeTrue;
+import static org.junit.jupiter.api.Assumptions.assumeTrue;
/**
* Test striped file write operation with data node failures with parameterized
@@ -62,9 +62,9 @@ public void runTestWithSingleFailure() {
}
final int i = base;
final Integer length = getLength(i);
- assumeTrue("Skip test " + i + " since length=null.", length != null);
- assumeTrue("Test " + i + ", length=" + length
- + ", is not chosen to run.", RANDOM.nextInt(16) != 0);
+ assumeTrue(length != null, "Skip test " + i + " since length=null.");
+ assumeTrue(RANDOM.nextInt(16) != 0, "Test " + i + ", length=" + length
+ + ", is not chosen to run.");
System.out.println("Run test " + i + ", length=" + length);
runTest(length);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ReadStripedFileWithDecodingHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ReadStripedFileWithDecodingHelper.java
index e1497445b93e8..ad7bbdc6ce5bd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ReadStripedFileWithDecodingHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ReadStripedFileWithDecodingHelper.java
@@ -30,7 +30,7 @@
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.util.StripedBlockUtil;
import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
+import org.junit.jupiter.api.Assertions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.event.Level;
@@ -200,11 +200,11 @@ public static void testReadWithBlockCorrupted(MiniDFSCluster cluster,
+ ", parityBlkDelNum = " + parityBlkDelNum
+ ", deleteBlockFile? " + deleteBlockFile);
int recoverBlkNum = dataBlkDelNum + parityBlkDelNum;
- Assert.assertTrue("dataBlkDelNum and parityBlkDelNum should be positive",
- dataBlkDelNum >= 0 && parityBlkDelNum >= 0);
- Assert.assertTrue("The sum of dataBlkDelNum and parityBlkDelNum " +
- "should be between 1 ~ " + NUM_PARITY_UNITS, recoverBlkNum <=
- NUM_PARITY_UNITS);
+ Assertions.assertTrue(
+ dataBlkDelNum >= 0 && parityBlkDelNum >= 0, "dataBlkDelNum and parityBlkDelNum should be positive");
+ Assertions.assertTrue(recoverBlkNum <=
+ NUM_PARITY_UNITS, "The sum of dataBlkDelNum and parityBlkDelNum " +
+ "should be between 1 ~ " + NUM_PARITY_UNITS);
// write a file with the length of writeLen
Path srcPath = new Path(src);
@@ -232,10 +232,10 @@ public static void corruptBlocks(MiniDFSCluster cluster,
int[] delDataBlkIndices = StripedFileTestUtil.randomArray(0, NUM_DATA_UNITS,
dataBlkDelNum);
- Assert.assertNotNull(delDataBlkIndices);
+ Assertions.assertNotNull(delDataBlkIndices);
int[] delParityBlkIndices = StripedFileTestUtil.randomArray(NUM_DATA_UNITS,
NUM_DATA_UNITS + NUM_PARITY_UNITS, parityBlkDelNum);
- Assert.assertNotNull(delParityBlkIndices);
+ Assertions.assertNotNull(delParityBlkIndices);
int[] delBlkIndices = new int[recoverBlkNum];
System.arraycopy(delDataBlkIndices, 0,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
index 6578ad0fbc8b6..c0150d4ae4d74 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
@@ -37,7 +37,7 @@
import org.apache.hadoop.io.erasurecode.CodecUtil;
import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureEncoder;
-import org.junit.Assert;
+import org.junit.jupiter.api.Assertions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -55,7 +55,7 @@
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicInteger;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
public class StripedFileTestUtil {
public static final Logger LOG =
@@ -77,7 +77,7 @@ static byte getByte(long pos) {
static void verifyLength(FileSystem fs, Path srcPath, int fileLength)
throws IOException {
FileStatus status = fs.getFileStatus(srcPath);
- assertEquals("File length should be the same", fileLength, status.getLen());
+ assertEquals(fileLength, status.getLen(), "File length should be the same");
}
static void verifyPread(DistributedFileSystem fs, Path srcPath,
@@ -109,9 +109,9 @@ static void verifyPread(FileSystem fs, Path srcPath, int fileLength,
offset += target;
}
for (int i = 0; i < fileLength - startOffset; i++) {
- assertEquals("Byte at " + (startOffset + i) + " is different, "
- + "the startOffset is " + startOffset, expected[startOffset + i],
- result[i]);
+ assertEquals(expected[startOffset + i],
+ result[i], "Byte at " + (startOffset + i) + " is different, "
+ + "the startOffset is " + startOffset);
}
}
}
@@ -127,8 +127,8 @@ static void verifyStatefulRead(FileSystem fs, Path srcPath, int fileLength,
System.arraycopy(buf, 0, result, readLen, ret);
readLen += ret;
}
- assertEquals("The length of file should be the same to write size", fileLength, readLen);
- Assert.assertArrayEquals(expected, result);
+ assertEquals(fileLength, readLen, "The length of file should be the same to write size");
+ Assertions.assertArrayEquals(expected, result);
}
}
@@ -144,8 +144,8 @@ static void verifyStatefulRead(FileSystem fs, Path srcPath, int fileLength,
result.put(buf);
buf.clear();
}
- assertEquals("The length of file should be the same to write size", fileLength, readLen);
- Assert.assertArrayEquals(expected, result.array());
+ assertEquals(fileLength, readLen, "The length of file should be the same to write size");
+ Assertions.assertArrayEquals(expected, result.array());
}
}
@@ -185,14 +185,14 @@ static void verifySeek(FileSystem fs, Path srcPath, int fileLength,
if (!(in.getWrappedStream() instanceof WebHdfsInputStream)) {
try {
in.seek(-1);
- Assert.fail("Should be failed if seek to negative offset");
+ Assertions.fail("Should be failed if seek to negative offset");
} catch (EOFException e) {
// expected
}
try {
in.seek(fileLength + 1);
- Assert.fail("Should be failed if seek after EOF");
+ Assertions.fail("Should be failed if seek after EOF");
} catch (EOFException e) {
// expected
}
@@ -206,8 +206,8 @@ static void assertSeekAndRead(FSDataInputStream fsdis, int pos,
byte[] buf = new byte[writeBytes - pos];
IOUtils.readFully(fsdis, buf, 0, buf.length);
for (int i = 0; i < buf.length; i++) {
- assertEquals("Byte at " + i + " should be the same",
- StripedFileTestUtil.getByte(pos + i), buf[i]);
+ assertEquals(
+ StripedFileTestUtil.getByte(pos + i), buf[i], "Byte at " + i + " should be the same");
}
}
@@ -225,7 +225,7 @@ static DatanodeInfo getDatanodes(StripedDataStreamer streamer) {
final DatanodeInfo[] datanodes = streamer.getNodes();
if (datanodes != null) {
assertEquals(1, datanodes.length);
- Assert.assertNotNull(datanodes[0]);
+ Assertions.assertNotNull(datanodes[0]);
return datanodes[0];
}
try {
@@ -377,13 +377,13 @@ static void checkData(DistributedFileSystem dfs, Path srcPath, int length,
final int parityBlkNum = ecPolicy.getNumParityUnits();
int index = 0;
for (LocatedBlock firstBlock : lbs.getLocatedBlocks()) {
- Assert.assertTrue(firstBlock instanceof LocatedStripedBlock);
+ Assertions.assertTrue(firstBlock instanceof LocatedStripedBlock);
final long gs = firstBlock.getBlock().getGenerationStamp();
final long oldGS = oldGSList != null ? oldGSList.get(index++) : -1L;
final String s = "gs=" + gs + ", oldGS=" + oldGS;
LOG.info(s);
- Assert.assertTrue(s, gs >= oldGS);
+ Assertions.assertTrue(gs >= oldGS, s);
LocatedBlock[] blocks = StripedBlockUtil.parseStripedBlockGroup(
(LocatedStripedBlock) firstBlock, cellSize,
@@ -456,7 +456,7 @@ static void checkData(DistributedFileSystem dfs, Path srcPath, int length,
for (int posInBlk = 0; posInBlk < actual.length; posInBlk++) {
final long posInFile = StripedBlockUtil.offsetInBlkToOffsetInBG(
cellSize, dataBlkNum, posInBlk, i) + groupPosInFile;
- Assert.assertTrue(posInFile < length);
+ Assertions.assertTrue(posInFile < length);
final byte expected = getByte(posInFile);
if (killed) {
@@ -466,7 +466,7 @@ static void checkData(DistributedFileSystem dfs, Path srcPath, int length,
String s = "expected=" + expected + " but actual=" + actual[posInBlk]
+ ", posInFile=" + posInFile + ", posInBlk=" + posInBlk
+ ". group=" + group + ", i=" + i;
- Assert.fail(s);
+ Assertions.fail(s);
}
}
}
@@ -507,12 +507,12 @@ static void verifyParityBlocks(Configuration conf, final long size,
try {
encoder.encode(dataBytes, expectedParityBytes);
} catch (IOException e) {
- Assert.fail("Unexpected IOException: " + e.getMessage());
+ Assertions.fail("Unexpected IOException: " + e.getMessage());
}
for (int i = 0; i < parityBytes.length; i++) {
if (checkSet.contains(i + dataBytes.length)){
- Assert.assertArrayEquals("i=" + i, expectedParityBytes[i],
- parityBytes[i]);
+ Assertions.assertArrayEquals(expectedParityBytes[i],
+ parityBytes[i], "i=" + i);
}
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java
index e7d8b38aed9ce..9632672532776 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.fail;
import java.io.IOException;
@@ -30,10 +30,10 @@
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
/**
* Test abandoning blocks, which clients do on pipeline creation failure.
@@ -48,14 +48,14 @@ public class TestAbandonBlock {
private MiniDFSCluster cluster;
private DistributedFileSystem fs;
- @Before
+ @BeforeEach
public void setUp() throws Exception {
cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build();
fs = cluster.getFileSystem();
cluster.waitActive();
}
- @After
+ @AfterEach
public void tearDown() throws Exception {
if (fs != null) {
fs.close();
@@ -100,8 +100,8 @@ public void testAbandonBlock() throws IOException {
cluster.restartNameNode();
blocks = dfsclient.getNamenode().getBlockLocations(src, 0,
Integer.MAX_VALUE);
- Assert.assertEquals("Blocks " + b + " has not been abandoned.",
- orginalNumBlocks, blocks.locatedBlockCount() + 1);
+ Assertions.assertEquals(
+ orginalNumBlocks, blocks.locatedBlockCount() + 1, "Blocks " + b + " has not been abandoned.");
}
@Test
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAclsEndToEnd.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAclsEndToEnd.java
index 105836e1b4461..6c5b2c0867b35 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAclsEndToEnd.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAclsEndToEnd.java
@@ -43,12 +43,11 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.ProxyUsers;
-import org.junit.Assert;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import static org.junit.jupiter.api.Assertions.*;
+
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
/**
* This class tests the ACLs system through the full code path. It overlaps
@@ -89,7 +88,7 @@ public class TestAclsEndToEnd {
private MiniDFSCluster cluster;
private DistributedFileSystem fs;
- @BeforeClass
+ @BeforeAll
public static void captureUser() throws IOException {
realUgi = UserGroupInformation.getCurrentUser();
realUser = System.getProperty("user.name");
@@ -174,7 +173,7 @@ private void setup(Configuration conf, boolean resetKms, boolean resetDfs)
kmsDir = new File(fsHelper.getTestRootDir()).getAbsoluteFile();
- Assert.assertTrue(kmsDir.mkdirs());
+ Assertions.assertTrue(kmsDir.mkdirs());
}
writeConf(kmsDir, conf);
@@ -411,66 +410,66 @@ private void doFullAclTest(final Configuration conf,
try {
setup(conf);
- // Create a test key
- assertTrue("Exception during creation of key " + KEY1 + " by "
- + keyadminUgi.getUserName(), createKey(keyadminUgi, KEY1, conf));
+ // Create a test key
+ assertTrue(createKey(keyadminUgi, KEY1, conf), "Exception during creation of key " + KEY1 + " by "
+ + keyadminUgi.getUserName());
- // Fail to create a test key
- assertFalse("Allowed creation of key " + KEY2 + " by "
- + hdfsUgi.getUserName(), createKey(hdfsUgi, KEY2, conf));
- assertFalse("Allowed creation of key " + KEY2 + " by "
- + userUgi.getUserName(), createKey(userUgi, KEY2, conf));
+ // Fail to create a test key
+ assertFalse(createKey(hdfsUgi, KEY2, conf), "Allowed creation of key " + KEY2 + " by "
+ + hdfsUgi.getUserName());
+ assertFalse(createKey(userUgi, KEY2, conf), "Allowed creation of key " + KEY2 + " by "
+ + userUgi.getUserName());
// Create a directory and chown it to the normal user.
fs.mkdirs(ZONE1);
fs.setOwner(ZONE1, userUgi.getUserName(),
userUgi.getPrimaryGroupName());
- // Create an EZ
- assertTrue("Exception during creation of EZ " + ZONE1 + " by "
- + hdfsUgi.getUserName() + " using key " + KEY1,
- createEncryptionZone(hdfsUgi, KEY1, ZONE1));
-
- // Fail to create an EZ
- assertFalse("Allowed creation of EZ " + ZONE2 + " by "
- + keyadminUgi.getUserName() + " using key " + KEY1,
- createEncryptionZone(keyadminUgi, KEY1, ZONE2));
- assertFalse("Allowed creation of EZ " + ZONE2 + " by "
- + userUgi.getUserName() + " using key " + KEY1,
- createEncryptionZone(userUgi, KEY1, ZONE2));
-
- // Create a file in the zone
- assertTrue("Exception during creation of file " + FILE1 + " by "
- + userUgi.getUserName(), createFile(userUgi, FILE1, TEXT));
-
- // Fail to create a file in the zone
- assertFalse("Allowed creation of file " + FILE1A + " by "
- + hdfsUgi.getUserName(), createFile(hdfsUgi, FILE1A, TEXT));
- assertFalse("Allowed creation of file " + FILE1A + " by "
- + keyadminUgi.getUserName(), createFile(keyadminUgi, FILE1A, TEXT));
-
- // Read a file in the zone
- assertTrue("Exception while reading file " + FILE1 + " by "
- + userUgi.getUserName(), compareFile(userUgi, FILE1, TEXT));
-
- // Fail to read a file in the zone
- assertFalse("Allowed reading of file " + FILE1 + " by "
- + hdfsUgi.getUserName(), compareFile(hdfsUgi, FILE1, TEXT));
- assertFalse("Allowed reading of file " + FILE1 + " by "
- + keyadminUgi.getUserName(), compareFile(keyadminUgi, FILE1, TEXT));
+ // Create an EZ
+ assertTrue(
+ createEncryptionZone(hdfsUgi, KEY1, ZONE1), "Exception during creation of EZ " + ZONE1 + " by "
+ + hdfsUgi.getUserName() + " using key " + KEY1);
+
+ // Fail to create an EZ
+ assertFalse(
+ createEncryptionZone(keyadminUgi, KEY1, ZONE2), "Allowed creation of EZ " + ZONE2 + " by "
+ + keyadminUgi.getUserName() + " using key " + KEY1);
+ assertFalse(
+ createEncryptionZone(userUgi, KEY1, ZONE2), "Allowed creation of EZ " + ZONE2 + " by "
+ + userUgi.getUserName() + " using key " + KEY1);
+
+ // Create a file in the zone
+ assertTrue(createFile(userUgi, FILE1, TEXT), "Exception during creation of file " + FILE1 + " by "
+ + userUgi.getUserName());
+
+ // Fail to create a file in the zone
+ assertFalse(createFile(hdfsUgi, FILE1A, TEXT), "Allowed creation of file " + FILE1A + " by "
+ + hdfsUgi.getUserName());
+ assertFalse(createFile(keyadminUgi, FILE1A, TEXT), "Allowed creation of file " + FILE1A + " by "
+ + keyadminUgi.getUserName());
+
+ // Read a file in the zone
+ assertTrue(compareFile(userUgi, FILE1, TEXT), "Exception while reading file " + FILE1 + " by "
+ + userUgi.getUserName());
+
+ // Fail to read a file in the zone
+ assertFalse(compareFile(hdfsUgi, FILE1, TEXT), "Allowed reading of file " + FILE1 + " by "
+ + hdfsUgi.getUserName());
+ assertFalse(compareFile(keyadminUgi, FILE1, TEXT), "Allowed reading of file " + FILE1 + " by "
+ + keyadminUgi.getUserName());
// Remove the zone
fs.delete(ZONE1, true);
- // Fail to remove the key
- assertFalse("Allowed deletion of file " + FILE1 + " by "
- + hdfsUgi.getUserName(), deleteKey(hdfsUgi, KEY1));
- assertFalse("Allowed deletion of file " + FILE1 + " by "
- + userUgi.getUserName(), deleteKey(userUgi, KEY1));
+ // Fail to remove the key
+ assertFalse(deleteKey(hdfsUgi, KEY1), "Allowed deletion of file " + FILE1 + " by "
+ + hdfsUgi.getUserName());
+ assertFalse(deleteKey(userUgi, KEY1), "Allowed deletion of file " + FILE1 + " by "
+ + userUgi.getUserName());
- // Remove
- assertTrue("Exception during deletion of file " + FILE1 + " by "
- + keyadminUgi.getUserName(), deleteKey(keyadminUgi, KEY1));
+ // Remove
+ assertTrue(deleteKey(keyadminUgi, KEY1), "Exception during deletion of file " + FILE1 + " by "
+ + keyadminUgi.getUserName());
} finally {
fs.delete(ZONE1, true);
fs.delete(ZONE2, true);
@@ -495,8 +494,8 @@ public void testCreateKey() throws Exception {
try {
setup(conf);
- assertTrue("Exception during key creation with correct config"
- + " using whitelist key ACLs", createKey(realUgi, KEY1, conf));
+ assertTrue(createKey(realUgi, KEY1, conf), "Exception during key creation with correct config"
+ + " using whitelist key ACLs");
} finally {
teardown();
}
@@ -512,8 +511,8 @@ public void testCreateKey() throws Exception {
try {
setup(conf);
- assertTrue("Exception during key creation with correct config"
- + " using default key ACLs", createKey(realUgi, KEY2, conf));
+ assertTrue(createKey(realUgi, KEY2, conf), "Exception during key creation with correct config"
+ + " using default key ACLs");
} finally {
teardown();
}
@@ -531,8 +530,8 @@ public void testCreateKey() throws Exception {
try {
setup(conf);
- assertFalse("Allowed key creation with blacklist for CREATE",
- createKey(realUgi, KEY3, conf));
+ assertFalse(
+ createKey(realUgi, KEY3, conf), "Allowed key creation with blacklist for CREATE");
} finally {
teardown();
}
@@ -547,8 +546,8 @@ public void testCreateKey() throws Exception {
try {
setup(conf);
- assertFalse("Allowed key creation without CREATE KMS ACL",
- createKey(realUgi, KEY3, conf));
+ assertFalse(
+ createKey(realUgi, KEY3, conf), "Allowed key creation without CREATE KMS ACL");
} finally {
teardown();
}
@@ -562,8 +561,8 @@ public void testCreateKey() throws Exception {
try {
setup(conf);
- assertFalse("Allowed key creation without MANAGMENT key ACL",
- createKey(realUgi, KEY3, conf));
+ assertFalse(
+ createKey(realUgi, KEY3, conf), "Allowed key creation without MANAGMENT key ACL");
} finally {
teardown();
}
@@ -581,8 +580,8 @@ public void testCreateKey() throws Exception {
try {
setup(conf);
- assertFalse("Allowed key creation when default key ACL should have been"
- + " overridden by key ACL", createKey(realUgi, KEY3, conf));
+ assertFalse(createKey(realUgi, KEY3, conf), "Allowed key creation when default key ACL should have been"
+ + " overridden by key ACL");
} finally {
teardown();
}
@@ -596,8 +595,8 @@ public void testCreateKey() throws Exception {
try {
setup(conf);
- assertTrue("Exception during key creation with default KMS ACLs",
- createKey(realUgi, KEY3, conf));
+ assertTrue(
+ createKey(realUgi, KEY3, conf), "Exception during key creation with default KMS ACLs");
} finally {
teardown();
}
@@ -620,8 +619,8 @@ public void testCreateEncryptionZone() throws Exception {
try {
setup(conf);
- assertTrue("Exception during key creation",
- createKey(realUgi, KEY1, conf));
+ assertTrue(
+ createKey(realUgi, KEY1, conf), "Exception during key creation");
} finally {
teardown();
}
@@ -647,8 +646,8 @@ public void testCreateEncryptionZone() throws Exception {
fs.mkdirs(ZONE1);
- assertTrue("Exception during zone creation with correct config using"
- + " whitelist key ACLs", createEncryptionZone(realUgi, KEY1, ZONE1));
+ assertTrue(createEncryptionZone(realUgi, KEY1, ZONE1), "Exception during zone creation with correct config using"
+ + " whitelist key ACLs");
} finally {
fs.delete(ZONE1, true);
teardown();
@@ -671,8 +670,8 @@ public void testCreateEncryptionZone() throws Exception {
fs.mkdirs(ZONE2);
- assertTrue("Exception during zone creation with correct config using"
- + " default key ACLs", createEncryptionZone(realUgi, KEY1, ZONE2));
+ assertTrue(createEncryptionZone(realUgi, KEY1, ZONE2), "Exception during zone creation with correct config using"
+ + " default key ACLs");
} finally {
fs.delete(ZONE2, true);
teardown();
@@ -697,9 +696,9 @@ public void testCreateEncryptionZone() throws Exception {
fs.mkdirs(ZONE3);
- assertFalse("Allowed creation of zone when default key ACLs should have"
- + " been overridden by key ACL",
- createEncryptionZone(realUgi, KEY1, ZONE3));
+ assertFalse(
+ createEncryptionZone(realUgi, KEY1, ZONE3), "Allowed creation of zone when default key ACLs should have"
+ + " been overridden by key ACL");
} finally {
fs.delete(ZONE3, true);
teardown();
@@ -724,8 +723,8 @@ public void testCreateEncryptionZone() throws Exception {
fs.mkdirs(ZONE3);
- assertFalse("Allowed zone creation of zone with blacklisted GET_METADATA",
- createEncryptionZone(realUgi, KEY1, ZONE3));
+ assertFalse(
+ createEncryptionZone(realUgi, KEY1, ZONE3), "Allowed zone creation of zone with blacklisted GET_METADATA");
} finally {
fs.delete(ZONE3, true);
teardown();
@@ -750,8 +749,8 @@ public void testCreateEncryptionZone() throws Exception {
fs.mkdirs(ZONE3);
- assertFalse("Allowed zone creation of zone with blacklisted GENERATE_EEK",
- createEncryptionZone(realUgi, KEY1, ZONE3));
+ assertFalse(
+ createEncryptionZone(realUgi, KEY1, ZONE3), "Allowed zone creation of zone with blacklisted GENERATE_EEK");
} finally {
fs.delete(ZONE3, true);
teardown();
@@ -771,8 +770,8 @@ public void testCreateEncryptionZone() throws Exception {
fs.mkdirs(ZONE3);
- assertTrue("Exception during zone creation with default KMS ACLs",
- createEncryptionZone(realUgi, KEY1, ZONE3));
+ assertTrue(
+ createEncryptionZone(realUgi, KEY1, ZONE3), "Exception during zone creation with default KMS ACLs");
} finally {
fs.delete(ZONE3, true);
teardown();
@@ -794,8 +793,8 @@ public void testCreateEncryptionZone() throws Exception {
fs.mkdirs(ZONE4);
- assertFalse("Allowed zone creation without GET_METADATA KMS ACL",
- createEncryptionZone(realUgi, KEY1, ZONE4));
+ assertFalse(
+ createEncryptionZone(realUgi, KEY1, ZONE4), "Allowed zone creation without GET_METADATA KMS ACL");
} finally {
fs.delete(ZONE4, true);
teardown();
@@ -817,8 +816,8 @@ public void testCreateEncryptionZone() throws Exception {
fs.mkdirs(ZONE4);
- assertFalse("Allowed zone creation without GENERATE_EEK KMS ACL",
- createEncryptionZone(realUgi, KEY1, ZONE4));
+ assertFalse(
+ createEncryptionZone(realUgi, KEY1, ZONE4), "Allowed zone creation without GENERATE_EEK KMS ACL");
} finally {
fs.delete(ZONE4, true);
teardown();
@@ -839,8 +838,8 @@ public void testCreateEncryptionZone() throws Exception {
fs.mkdirs(ZONE4);
- assertFalse("Allowed zone creation without READ ACL",
- createEncryptionZone(realUgi, KEY1, ZONE4));
+ assertFalse(
+ createEncryptionZone(realUgi, KEY1, ZONE4), "Allowed zone creation without READ ACL");
} finally {
fs.delete(ZONE4, true);
teardown();
@@ -861,8 +860,8 @@ public void testCreateEncryptionZone() throws Exception {
fs.mkdirs(ZONE4);
- assertFalse("Allowed zone creation without GENERATE_EEK ACL",
- createEncryptionZone(realUgi, KEY1, ZONE4));
+ assertFalse(
+ createEncryptionZone(realUgi, KEY1, ZONE4), "Allowed zone creation without GENERATE_EEK ACL");
} finally {
fs.delete(ZONE4, true);
teardown();
@@ -896,20 +895,20 @@ public void testCreateFileInEncryptionZone() throws Exception {
try {
setup(conf);
- assertTrue("Exception during key creation",
- createKey(realUgi, KEY1, conf));
+ assertTrue(
+ createKey(realUgi, KEY1, conf), "Exception during key creation");
fs.mkdirs(ZONE1);
- assertTrue("Exception during zone creation",
- createEncryptionZone(realUgi, KEY1, ZONE1));
+ assertTrue(
+ createEncryptionZone(realUgi, KEY1, ZONE1), "Exception during zone creation");
fs.mkdirs(ZONE2);
- assertTrue("Exception during zone creation",
- createEncryptionZone(realUgi, KEY1, ZONE2));
+ assertTrue(
+ createEncryptionZone(realUgi, KEY1, ZONE2), "Exception during zone creation");
fs.mkdirs(ZONE3);
- assertTrue("Exception during zone creation",
- createEncryptionZone(realUgi, KEY1, ZONE3));
+ assertTrue(
+ createEncryptionZone(realUgi, KEY1, ZONE3), "Exception during zone creation");
fs.mkdirs(ZONE4);
- assertTrue("Exception during zone creation",
- createEncryptionZone(realUgi, KEY1, ZONE4));
+ assertTrue(
+ createEncryptionZone(realUgi, KEY1, ZONE4), "Exception during zone creation");
} catch (Throwable ex) {
fs.delete(ZONE1, true);
fs.delete(ZONE2, true);
@@ -941,8 +940,8 @@ public void testCreateFileInEncryptionZone() throws Exception {
try {
setup(conf, false, false);
- assertTrue("Exception during file creation with correct config"
- + " using whitelist ACL", createFile(realUgi, FILE1, TEXT));
+ assertTrue(createFile(realUgi, FILE1, TEXT), "Exception during file creation with correct config"
+ + " using whitelist ACL");
} finally {
fs.delete(ZONE1, true);
teardown();
@@ -963,8 +962,8 @@ public void testCreateFileInEncryptionZone() throws Exception {
try {
setup(conf, false, false);
- assertTrue("Exception during file creation with correct config"
- + " using whitelist ACL", createFile(realUgi, FILE2, TEXT));
+ assertTrue(createFile(realUgi, FILE2, TEXT), "Exception during file creation with correct config"
+ + " using whitelist ACL");
} finally {
fs.delete(ZONE2, true);
teardown();
@@ -987,8 +986,8 @@ public void testCreateFileInEncryptionZone() throws Exception {
try {
setup(conf, false, false);
- assertFalse("Allowed file creation when default key ACLs should have been"
- + " overridden by key ACL", createFile(realUgi, FILE3, TEXT));
+ assertFalse(createFile(realUgi, FILE3, TEXT), "Allowed file creation when default key ACLs should have been"
+ + " overridden by key ACL");
} catch (Exception ex) {
fs.delete(ZONE3, true);
@@ -1014,8 +1013,8 @@ public void testCreateFileInEncryptionZone() throws Exception {
try {
setup(conf, false, false);
- assertFalse("Allowed file creation with blacklist for GENERATE_EEK",
- createFile(realUgi, FILE3, TEXT));
+ assertFalse(
+ createFile(realUgi, FILE3, TEXT), "Allowed file creation with blacklist for GENERATE_EEK");
} catch (Exception ex) {
fs.delete(ZONE3, true);
@@ -1041,8 +1040,8 @@ public void testCreateFileInEncryptionZone() throws Exception {
try {
setup(conf, false, false);
- assertFalse("Allowed file creation with blacklist for DECRYPT_EEK",
- createFile(realUgi, FILE3, TEXT));
+ assertFalse(
+ createFile(realUgi, FILE3, TEXT), "Allowed file creation with blacklist for DECRYPT_EEK");
} catch (Exception ex) {
fs.delete(ZONE3, true);
@@ -1062,8 +1061,8 @@ public void testCreateFileInEncryptionZone() throws Exception {
try {
setup(conf, false, false);
- assertTrue("Exception during file creation with default KMS ACLs",
- createFile(realUgi, FILE3, TEXT));
+ assertTrue(
+ createFile(realUgi, FILE3, TEXT), "Exception during file creation with default KMS ACLs");
} catch (Exception ex) {
fs.delete(ZONE3, true);
@@ -1086,8 +1085,8 @@ public void testCreateFileInEncryptionZone() throws Exception {
try {
setup(conf, false, false);
- assertFalse("Allowed file creation without GENERATE_EEK KMS ACL",
- createFile(realUgi, FILE4, TEXT));
+ assertFalse(
+ createFile(realUgi, FILE4, TEXT), "Allowed file creation without GENERATE_EEK KMS ACL");
} catch (Exception ex) {
fs.delete(ZONE3, true);
@@ -1110,8 +1109,8 @@ public void testCreateFileInEncryptionZone() throws Exception {
try {
setup(conf, false, false);
- assertFalse("Allowed file creation without DECRYPT_EEK KMS ACL",
- createFile(realUgi, FILE3, TEXT));
+ assertFalse(
+ createFile(realUgi, FILE3, TEXT), "Allowed file creation without DECRYPT_EEK KMS ACL");
} catch (Exception ex) {
fs.delete(ZONE3, true);
@@ -1133,8 +1132,8 @@ public void testCreateFileInEncryptionZone() throws Exception {
try {
setup(conf, false, false);
- assertFalse("Allowed file creation without GENERATE_EEK key ACL",
- createFile(realUgi, FILE3, TEXT));
+ assertFalse(
+ createFile(realUgi, FILE3, TEXT), "Allowed file creation without GENERATE_EEK key ACL");
} catch (Exception ex) {
fs.delete(ZONE3, true);
@@ -1156,8 +1155,8 @@ public void testCreateFileInEncryptionZone() throws Exception {
try {
setup(conf, false, false);
- assertFalse("Allowed file creation without DECRYPT_EEK key ACL",
- createFile(realUgi, FILE3, TEXT));
+ assertFalse(
+ createFile(realUgi, FILE3, TEXT), "Allowed file creation without DECRYPT_EEK key ACL");
} catch (Exception ex) {
fs.delete(ZONE3, true);
@@ -1198,13 +1197,13 @@ public void testReadFileInEncryptionZone() throws Exception {
try {
setup(conf);
- assertTrue("Exception during key creation",
- createKey(realUgi, KEY1, conf));
+ assertTrue(
+ createKey(realUgi, KEY1, conf), "Exception during key creation");
fs.mkdirs(ZONE1);
- assertTrue("Exception during zone creation",
- createEncryptionZone(realUgi, KEY1, ZONE1));
- assertTrue("Exception during file creation",
- createFile(realUgi, FILE1, TEXT));
+ assertTrue(
+ createEncryptionZone(realUgi, KEY1, ZONE1), "Exception during zone creation");
+ assertTrue(
+ createFile(realUgi, FILE1, TEXT), "Exception during file creation");
} catch (Throwable ex) {
fs.delete(ZONE1, true);
@@ -1229,8 +1228,8 @@ public void testReadFileInEncryptionZone() throws Exception {
try {
setup(conf, false, false);
- assertTrue("Exception while reading file with correct config with"
- + " whitelist ACLs", compareFile(realUgi, FILE1, TEXT));
+ assertTrue(compareFile(realUgi, FILE1, TEXT), "Exception while reading file with correct config with"
+ + " whitelist ACLs");
} catch (Throwable ex) {
fs.delete(ZONE1, true);
@@ -1250,8 +1249,8 @@ public void testReadFileInEncryptionZone() throws Exception {
try {
setup(conf, false, false);
- assertTrue("Exception while reading file with correct config"
- + " with default ACLs", compareFile(realUgi, FILE1, TEXT));
+ assertTrue(compareFile(realUgi, FILE1, TEXT), "Exception while reading file with correct config"
+ + " with default ACLs");
} catch (Throwable ex) {
fs.delete(ZONE1, true);
@@ -1273,8 +1272,8 @@ public void testReadFileInEncryptionZone() throws Exception {
try {
setup(conf, false, false);
- assertFalse("Allowed file read when default key ACLs should have been"
- + " overridden by key ACL", compareFile(realUgi, FILE1, TEXT));
+ assertFalse(compareFile(realUgi, FILE1, TEXT), "Allowed file read when default key ACLs should have been"
+ + " overridden by key ACL");
} catch (Throwable ex) {
fs.delete(ZONE1, true);
@@ -1296,8 +1295,8 @@ public void testReadFileInEncryptionZone() throws Exception {
try {
setup(conf, false, false);
- assertFalse("Allowed file read with blacklist for DECRYPT_EEK",
- compareFile(realUgi, FILE1, TEXT));
+ assertFalse(
+ compareFile(realUgi, FILE1, TEXT), "Allowed file read with blacklist for DECRYPT_EEK");
} catch (Throwable ex) {
fs.delete(ZONE1, true);
@@ -1315,8 +1314,8 @@ public void testReadFileInEncryptionZone() throws Exception {
try {
setup(conf, false, false);
- assertTrue("Exception while reading file with default KMS ACLs",
- compareFile(realUgi, FILE1, TEXT));
+ assertTrue(
+ compareFile(realUgi, FILE1, TEXT), "Exception while reading file with default KMS ACLs");
} catch (Throwable ex) {
fs.delete(ZONE1, true);
@@ -1335,8 +1334,8 @@ public void testReadFileInEncryptionZone() throws Exception {
try {
setup(conf, false, false);
- assertFalse("Allowed file read without DECRYPT_EEK KMS ACL",
- compareFile(realUgi, FILE1, TEXT));
+ assertFalse(
+ compareFile(realUgi, FILE1, TEXT), "Allowed file read without DECRYPT_EEK KMS ACL");
} catch (Throwable ex) {
fs.delete(ZONE1, true);
@@ -1351,8 +1350,8 @@ public void testReadFileInEncryptionZone() throws Exception {
try {
setup(conf, false, false);
- assertFalse("Allowed file read without DECRYPT_EEK key ACL",
- compareFile(realUgi, FILE1, TEXT));
+ assertFalse(
+ compareFile(realUgi, FILE1, TEXT), "Allowed file read without DECRYPT_EEK key ACL");
} catch (Throwable ex) {
fs.delete(ZONE1, true);
@@ -1379,12 +1378,12 @@ public void testDeleteKey() throws Exception {
try {
setup(conf);
- assertTrue("Exception during key creation",
- createKey(realUgi, KEY1, conf));
- assertTrue("Exception during key creation",
- createKey(realUgi, KEY2, conf));
- assertTrue("Exception during key creation",
- createKey(realUgi, KEY3, conf));
+ assertTrue(
+ createKey(realUgi, KEY1, conf), "Exception during key creation");
+ assertTrue(
+ createKey(realUgi, KEY2, conf), "Exception during key creation");
+ assertTrue(
+ createKey(realUgi, KEY3, conf), "Exception during key creation");
} finally {
teardown();
}
@@ -1405,8 +1404,8 @@ public void testDeleteKey() throws Exception {
try {
setup(conf, false);
- assertTrue("Exception during key deletion with correct config"
- + " using whitelist key ACLs", deleteKey(realUgi, KEY1));
+ assertTrue(deleteKey(realUgi, KEY1), "Exception during key deletion with correct config"
+ + " using whitelist key ACLs");
} finally {
teardown();
}
@@ -1422,8 +1421,8 @@ public void testDeleteKey() throws Exception {
try {
setup(conf, false);
- assertTrue("Exception during key deletion with correct config"
- + " using default key ACLs", deleteKey(realUgi, KEY2));
+ assertTrue(deleteKey(realUgi, KEY2), "Exception during key deletion with correct config"
+ + " using default key ACLs");
} finally {
teardown();
}
@@ -1441,8 +1440,8 @@ public void testDeleteKey() throws Exception {
try {
setup(conf, false);
- assertFalse("Allowed key deletion with blacklist for DELETE",
- deleteKey(realUgi, KEY3));
+ assertFalse(
+ deleteKey(realUgi, KEY3), "Allowed key deletion with blacklist for DELETE");
} finally {
teardown();
}
@@ -1457,8 +1456,8 @@ public void testDeleteKey() throws Exception {
try {
setup(conf, false);
- assertFalse("Allowed key deletion without DELETE KMS ACL",
- deleteKey(realUgi, KEY3));
+ assertFalse(
+ deleteKey(realUgi, KEY3), "Allowed key deletion without DELETE KMS ACL");
} finally {
teardown();
}
@@ -1473,8 +1472,8 @@ public void testDeleteKey() throws Exception {
try {
setup(conf, false);
- assertFalse("Allowed key deletion without MANAGMENT key ACL",
- deleteKey(realUgi, KEY3));
+ assertFalse(
+ deleteKey(realUgi, KEY3), "Allowed key deletion without MANAGMENT key ACL");
} finally {
teardown();
}
@@ -1492,8 +1491,8 @@ public void testDeleteKey() throws Exception {
try {
setup(conf, false);
- assertFalse("Allowed key deletion when default key ACL should have been"
- + " overridden by key ACL", deleteKey(realUgi, KEY3));
+ assertFalse(deleteKey(realUgi, KEY3), "Allowed key deletion when default key ACL should have been"
+ + " overridden by key ACL");
} finally {
teardown();
}
@@ -1507,8 +1506,8 @@ public void testDeleteKey() throws Exception {
try {
setup(conf, false);
- assertTrue("Exception during key deletion with default KMS ACLs",
- deleteKey(realUgi, KEY3));
+ assertTrue(
+ deleteKey(realUgi, KEY3), "Exception during key deletion with default KMS ACLs");
} finally {
teardown();
}
@@ -1596,8 +1595,8 @@ public void execute() throws IOException {
FSDataInputStream din = cluster.getFileSystem().open(file);
BufferedReader in = new BufferedReader(new InputStreamReader(din));
- assertEquals("The text read does not match the text written",
- text, in.readLine());
+ assertEquals(
+ text, in.readLine(), "The text read does not match the text written");
}
});
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendDifferentChecksum.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendDifferentChecksum.java
index eedbdb988ce9d..5894a38eb1283 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendDifferentChecksum.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendDifferentChecksum.java
@@ -26,10 +26,10 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.Time;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Ignore;
import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Disabled;
/**
* Test cases for trying to append to a file with a different
@@ -44,7 +44,7 @@ public class TestAppendDifferentChecksum {
private static FileSystem fs;
- @BeforeClass
+ @BeforeAll
public static void setupCluster() throws IOException {
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096);
@@ -55,7 +55,7 @@ public static void setupCluster() throws IOException {
fs = cluster.getFileSystem();
}
- @AfterClass
+ @AfterAll
public static void teardown() throws IOException {
if (cluster != null) {
cluster.shutdown();
@@ -68,7 +68,7 @@ public static void teardown() throws IOException {
* difficulties in doing so.
*/
@Test
- @Ignore("this is not implemented! See HDFS-2130")
+ @Disabled("this is not implemented! See HDFS-2130")
public void testSwitchChunkSize() throws IOException {
FileSystem fsWithSmallChunk = createFsWithChecksum("CRC32", 512);
FileSystem fsWithBigChunk = createFsWithChecksum("CRC32", 1024);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
index b4e9550e11828..c68f05a2b6a08 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
@@ -45,11 +45,10 @@
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.TestFileTruncate;
import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
import org.junit.Test;
-
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions;
import org.slf4j.event.Level;
@@ -78,7 +77,7 @@ public class TestAppendSnapshotTruncate {
static MiniDFSCluster cluster;
static DistributedFileSystem dfs;
- @BeforeClass
+ @BeforeAll
public static void startUp() throws IOException {
conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, BLOCK_SIZE);
@@ -95,7 +94,7 @@ public static void startUp() throws IOException {
dfs = cluster.getFileSystem();
}
- @AfterClass
+ @AfterAll
public static void tearDown() throws IOException {
if(dfs != null) {
dfs.close();
@@ -167,7 +166,7 @@ String createSnapshot(String snapshot) throws IOException {
{
//copy all local files to a sub dir to simulate snapshot.
final File subDir = new File(localDir, snapshot);
- Assert.assertFalse(subDir.exists());
+ Assertions.assertFalse(subDir.exists());
subDir.mkdir();
for(File f : localDir.listFiles(FILE_ONLY)) {
@@ -185,12 +184,12 @@ String checkSnapshot(String snapshot) throws IOException {
.append(snapshot);
final File subDir = new File(localDir, snapshot);
- Assert.assertTrue(subDir.exists());
+ Assertions.assertTrue(subDir.exists());
final File[] localFiles = subDir.listFiles(FILE_ONLY);
final Path p = snapshotPaths.get(snapshot);
final FileStatus[] statuses = dfs.listStatus(p);
- Assert.assertEquals(localFiles.length, statuses.length);
+ Assertions.assertEquals(localFiles.length, statuses.length);
b.append(p).append(" vs ").append(subDir).append(", ")
.append(statuses.length).append(" entries");
@@ -374,8 +373,8 @@ int checkLength() throws IOException {
static int checkLength(Path file, File localFile) throws IOException {
final long length = dfs.getFileStatus(file).getLen();
- Assert.assertEquals(localFile.length(), length);
- Assert.assertTrue(length <= Integer.MAX_VALUE);
+ Assertions.assertEquals(localFile.length(), length);
+ Assertions.assertTrue(length <= Integer.MAX_VALUE);
return (int)length;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestApplyingStoragePolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestApplyingStoragePolicy.java
index 200fab6f8aa4a..b77cc59f42ba6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestApplyingStoragePolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestApplyingStoragePolicy.java
@@ -17,8 +17,8 @@
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.FileNotFoundException;
import java.io.IOException;
@@ -27,9 +27,9 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
public class TestApplyingStoragePolicy {
private static final short REPL = 1;
@@ -39,7 +39,7 @@ public class TestApplyingStoragePolicy {
private static MiniDFSCluster cluster;
private static DistributedFileSystem fs;
- @Before
+ @BeforeEach
public void clusterSetUp() throws IOException {
conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL).build();
@@ -47,7 +47,7 @@ public void clusterSetUp() throws IOException {
fs = cluster.getFileSystem();
}
- @After
+ @AfterEach
public void clusterShutdown() throws IOException{
if(fs != null) {
fs.close();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java
index 3191fbdf8fe1f..5652662008dbf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java
@@ -17,8 +17,8 @@
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.ByteArrayOutputStream;
import java.io.PrintStream;
@@ -34,7 +34,7 @@
import org.apache.hadoop.hdfs.tools.DFSAdmin;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.ToolRunner;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
/**
* This test ensures that the balancer bandwidth is dynamically adjusted
@@ -131,11 +131,11 @@ private void runGetBalancerBandwidthCmd(DFSAdmin admin, String[] args,
try {
System.setOut(outStream);
int exitCode = admin.run(args);
- assertEquals("DFSAdmin should return 0", 0, exitCode);
+ assertEquals(0, exitCode, "DFSAdmin should return 0");
String bandwidthOutMsg = "Balancer bandwidth is " + expectedBandwidth
+ " bytes per second.";
String strOut = new String(outContent.toByteArray(), UTF8);
- assertTrue("Wrong balancer bandwidth!", strOut.contains(bandwidthOutMsg));
+ assertTrue(strOut.contains(bandwidthOutMsg), "Wrong balancer bandwidth!");
} finally {
System.setOut(initialStdOut);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBatchedListDirectories.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBatchedListDirectories.java
index 11bfa2fe27d54..d057631293219 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBatchedListDirectories.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBatchedListDirectories.java
@@ -33,10 +33,10 @@
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Lists;
import org.hamcrest.core.StringContains;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
import org.junit.rules.ExpectedException;
import java.io.FileNotFoundException;
@@ -46,9 +46,7 @@
import java.util.List;
import java.util.Map;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
/**
* Tests for the batched listing API.
@@ -85,15 +83,15 @@ private static Path getFileName(int i, int j, int k) {
private static void assertSubDirEquals(int i, int j, Path p) {
assertTrue(p.toString().startsWith("hdfs://"));
Path expected = getSubDirName(i, j);
- assertEquals("Unexpected subdir name",
- expected.toString(), p.toUri().getPath());
+ assertEquals(
+ expected.toString(), p.toUri().getPath(), "Unexpected subdir name");
}
private static void assertFileEquals(int i, int j, int k, Path p) {
assertTrue(p.toString().startsWith("hdfs://"));
Path expected = getFileName(i, j, k);
- assertEquals("Unexpected file name",
- expected.toString(), p.toUri().getPath());
+ assertEquals(
+ expected.toString(), p.toUri().getPath(), "Unexpected file name");
}
private static void loadData() throws Exception {
@@ -119,7 +117,7 @@ private static void loadData() throws Exception {
dfs.setPermission(INACCESSIBLE_DIR_PATH, new FsPermission(0000));
}
- @BeforeClass
+ @BeforeAll
public static void beforeClass() throws Exception {
conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_LIST_LIMIT, 7);
@@ -132,7 +130,7 @@ public static void beforeClass() throws Exception {
loadData();
}
- @AfterClass
+ @AfterAll
public static void afterClass() {
if (cluster != null) {
cluster.shutdown();
@@ -233,8 +231,8 @@ public void listDirRelative() throws Exception {
dfs.setWorkingDirectory(new Path("/dir0"));
List paths = Lists.newArrayList(new Path("."));
List statuses = getStatuses(paths);
- assertEquals("Wrong number of items",
- SECOND_LEVEL_DIRS, statuses.size());
+ assertEquals(
+ SECOND_LEVEL_DIRS, statuses.size(), "Wrong number of items");
for (int i = 0; i < SECOND_LEVEL_DIRS; i++) {
FileStatus stat = statuses.get(i);
assertSubDirEquals(0, i, stat.getPath());
@@ -246,8 +244,8 @@ public void listFilesRelative() throws Exception {
dfs.setWorkingDirectory(new Path("/dir0"));
List paths = Lists.newArrayList(new Path("subdir0"));
List statuses = getStatuses(paths);
- assertEquals("Wrong number of items",
- FILES_PER_DIR, statuses.size());
+ assertEquals(
+ FILES_PER_DIR, statuses.size(), "Wrong number of items");
for (int i = 0; i < FILES_PER_DIR; i++) {
FileStatus stat = statuses.get(i);
assertFileEquals(0, 0, i, stat.getPath());
@@ -256,9 +254,9 @@ public void listFilesRelative() throws Exception {
@Test
public void testDFSHasCapability() throws Throwable {
- assertTrue("FS does not declare PathCapability support",
- dfs.hasPathCapability(new Path("/"),
- CommonPathCapabilities.FS_EXPERIMENTAL_BATCH_LISTING));
+ assertTrue(
+ dfs.hasPathCapability(new Path("/"),
+ CommonPathCapabilities.FS_EXPERIMENTAL_BATCH_LISTING), "FS does not declare PathCapability support");
}
private void listFilesInternal(int numFiles) throws Exception {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java
index c679f6c576582..0a7b3b9740f45 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.IOException;
@@ -31,7 +31,7 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
public class TestBlockMissingException {
final static Logger LOG =
@@ -116,6 +116,6 @@ private void validateFile(FileSystem fileSys, Path name)
gotException = true;
}
stm.close();
- assertTrue("Expected BlockMissingException ", gotException);
+ assertTrue(gotException, "Expected BlockMissingException ");
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
index 54c3eda4b86b9..8290f57bb6069 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.hdfs;
import static org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
+import static org.junit.jupiter.api.Assertions.fail;
import java.io.File;
import java.io.FileNotFoundException;
@@ -47,9 +48,8 @@
import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.util.Lists;
import org.apache.hadoop.util.Sets;
-import org.junit.Assert;
-import static org.junit.Assert.fail;
import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
/** Test {@link BlockStoragePolicy} */
public class TestBlockStoragePolicy {
@@ -160,10 +160,10 @@ public void testDefaultPolicies() {
final BlockStoragePolicy policy = POLICY_SUITE.getPolicy(i);
if (policy != null) {
final String s = policy.toString();
- Assert.assertEquals(expectedPolicyStrings.get(i), s);
+ Assertions.assertEquals(expectedPolicyStrings.get(i), s);
}
}
- Assert.assertEquals(POLICY_SUITE.getPolicy(HOT), POLICY_SUITE.getDefaultPolicy());
+ Assertions.assertEquals(POLICY_SUITE.getPolicy(HOT), POLICY_SUITE.getDefaultPolicy());
// check Cold policy
final BlockStoragePolicy cold = POLICY_SUITE.getPolicy(COLD);
@@ -260,11 +260,11 @@ static List asList(int nDisk, int nArchive) {
static void assertStorageType(List computed, short replication,
StorageType... answers) {
- Assert.assertEquals(replication, computed.size());
+ Assertions.assertEquals(replication, computed.size());
final StorageType last = answers[answers.length - 1];
for(int i = 0; i < computed.size(); i++) {
final StorageType expected = i < answers.length? answers[i]: last;
- Assert.assertEquals(expected, computed.get(i));
+ Assertions.assertEquals(expected, computed.get(i));
}
}
@@ -272,27 +272,27 @@ static void assertCreationFallback(BlockStoragePolicy policy,
StorageType noneExpected, StorageType archiveExpected,
StorageType diskExpected, StorageType ssdExpected,
StorageType disk_archiveExpected, StorageType nvdimmExpected) {
- Assert.assertEquals(noneExpected, policy.getCreationFallback(none));
- Assert.assertEquals(archiveExpected, policy.getCreationFallback(archive));
- Assert.assertEquals(diskExpected, policy.getCreationFallback(disk));
- Assert.assertEquals(ssdExpected, policy.getCreationFallback(ssd));
- Assert.assertEquals(nvdimmExpected, policy.getCreationFallback(nvdimm));
- Assert.assertEquals(disk_archiveExpected,
+ Assertions.assertEquals(noneExpected, policy.getCreationFallback(none));
+ Assertions.assertEquals(archiveExpected, policy.getCreationFallback(archive));
+ Assertions.assertEquals(diskExpected, policy.getCreationFallback(disk));
+ Assertions.assertEquals(ssdExpected, policy.getCreationFallback(ssd));
+ Assertions.assertEquals(nvdimmExpected, policy.getCreationFallback(nvdimm));
+ Assertions.assertEquals(disk_archiveExpected,
policy.getCreationFallback(disk_archive));
- Assert.assertEquals(null, policy.getCreationFallback(all));
+ Assertions.assertEquals(null, policy.getCreationFallback(all));
}
static void assertReplicationFallback(BlockStoragePolicy policy,
StorageType noneExpected, StorageType archiveExpected,
StorageType diskExpected, StorageType ssdExpected,
StorageType nvdimmExpected) {
- Assert.assertEquals(noneExpected, policy.getReplicationFallback(none));
- Assert
+ Assertions.assertEquals(noneExpected, policy.getReplicationFallback(none));
+ Assertions
.assertEquals(archiveExpected, policy.getReplicationFallback(archive));
- Assert.assertEquals(diskExpected, policy.getReplicationFallback(disk));
- Assert.assertEquals(ssdExpected, policy.getReplicationFallback(ssd));
- Assert.assertEquals(nvdimmExpected, policy.getReplicationFallback(nvdimm));
- Assert.assertEquals(null, policy.getReplicationFallback(all));
+ Assertions.assertEquals(diskExpected, policy.getReplicationFallback(disk));
+ Assertions.assertEquals(ssdExpected, policy.getReplicationFallback(ssd));
+ Assertions.assertEquals(nvdimmExpected, policy.getReplicationFallback(nvdimm));
+ Assertions.assertEquals(null, policy.getReplicationFallback(all));
}
private static interface CheckChooseStorageTypes {
@@ -879,7 +879,7 @@ static void assertStorageTypes(List computed, StorageType... expect
static void assertStorageTypes(StorageType[] computed, StorageType... expected) {
Arrays.sort(expected);
Arrays.sort(computed);
- Assert.assertArrayEquals(expected, computed);
+ Assertions.assertArrayEquals(expected, computed);
}
@Test
@@ -924,9 +924,9 @@ static void checkChooseExcess(BlockStoragePolicy p, short replication,
}
private void checkDirectoryListing(HdfsFileStatus[] stats, byte... policies) {
- Assert.assertEquals(stats.length, policies.length);
+ Assertions.assertEquals(stats.length, policies.length);
for (int i = 0; i < stats.length; i++) {
- Assert.assertEquals(stats[i].getStoragePolicy(), policies[i]);
+ Assertions.assertEquals(stats[i].getStoragePolicy(), policies[i]);
}
}
@@ -949,7 +949,7 @@ public void testSetStoragePolicy() throws Exception {
final String invalidPolicyName = "INVALID-POLICY";
try {
fs.setStoragePolicy(fooFile, invalidPolicyName);
- Assert.fail("Should throw a HadoopIllegalArgumentException");
+ Assertions.fail("Should throw a HadoopIllegalArgumentException");
} catch (RemoteException e) {
GenericTestUtils.assertExceptionContains(invalidPolicyName, e);
}
@@ -967,14 +967,14 @@ public void testSetStoragePolicy() throws Exception {
final Path invalidPath = new Path("/invalidPath");
try {
fs.setStoragePolicy(invalidPath, HdfsConstants.WARM_STORAGE_POLICY_NAME);
- Assert.fail("Should throw a FileNotFoundException");
+ Assertions.fail("Should throw a FileNotFoundException");
} catch (FileNotFoundException e) {
GenericTestUtils.assertExceptionContains(invalidPath.toString(), e);
}
try {
fs.getStoragePolicy(invalidPath);
- Assert.fail("Should throw a FileNotFoundException");
+ Assertions.fail("Should throw a FileNotFoundException");
} catch (FileNotFoundException e) {
GenericTestUtils.assertExceptionContains(invalidPath.toString(), e);
}
@@ -982,15 +982,15 @@ public void testSetStoragePolicy() throws Exception {
fs.setStoragePolicy(fooFile, HdfsConstants.COLD_STORAGE_POLICY_NAME);
fs.setStoragePolicy(barDir, HdfsConstants.WARM_STORAGE_POLICY_NAME);
fs.setStoragePolicy(barFile2, HdfsConstants.HOT_STORAGE_POLICY_NAME);
- Assert.assertEquals("File storage policy should be COLD",
- HdfsConstants.COLD_STORAGE_POLICY_NAME,
- fs.getStoragePolicy(fooFile).getName());
- Assert.assertEquals("File storage policy should be WARM",
- HdfsConstants.WARM_STORAGE_POLICY_NAME,
- fs.getStoragePolicy(barDir).getName());
- Assert.assertEquals("File storage policy should be HOT",
- HdfsConstants.HOT_STORAGE_POLICY_NAME,
- fs.getStoragePolicy(barFile2).getName());
+ Assertions.assertEquals(
+ HdfsConstants.COLD_STORAGE_POLICY_NAME,
+ fs.getStoragePolicy(fooFile).getName(), "File storage policy should be COLD");
+ Assertions.assertEquals(
+ HdfsConstants.WARM_STORAGE_POLICY_NAME,
+ fs.getStoragePolicy(barDir).getName(), "File storage policy should be WARM");
+ Assertions.assertEquals(
+ HdfsConstants.HOT_STORAGE_POLICY_NAME,
+ fs.getStoragePolicy(barFile2).getName(), "File storage policy should be HOT");
dirList = fs.getClient().listPaths(dir.toString(),
HdfsFileStatus.EMPTY_NAME).getPartialListing();
@@ -1040,8 +1040,8 @@ public void testGetStoragePolicy() throws Exception {
HdfsConstants.COLD_STORAGE_POLICY_NAME);
String policyName = client.getStoragePolicy("/testGetStoragePolicy/foo")
.getName();
- Assert.assertEquals("File storage policy should be COLD",
- HdfsConstants.COLD_STORAGE_POLICY_NAME, policyName);
+ Assertions.assertEquals(
+ HdfsConstants.COLD_STORAGE_POLICY_NAME, policyName, "File storage policy should be COLD");
} finally {
cluster.shutdown();
}
@@ -1140,14 +1140,14 @@ private void checkLocatedBlocks(HdfsLocatedFileStatus status, int blockNum,
List typeList = Lists.newArrayList();
Collections.addAll(typeList, types);
LocatedBlocks lbs = status.getLocatedBlocks();
- Assert.assertEquals(blockNum, lbs.getLocatedBlocks().size());
+ Assertions.assertEquals(blockNum, lbs.getLocatedBlocks().size());
for (LocatedBlock lb : lbs.getLocatedBlocks()) {
- Assert.assertEquals(replicaNum, lb.getStorageTypes().length);
+ Assertions.assertEquals(replicaNum, lb.getStorageTypes().length);
for (StorageType type : lb.getStorageTypes()) {
- Assert.assertTrue(typeList.remove(type));
+ Assertions.assertTrue(typeList.remove(type));
}
}
- Assert.assertTrue(typeList.isEmpty());
+ Assertions.assertTrue(typeList.isEmpty());
}
private void testChangeFileRep(String policyName, byte policyId,
@@ -1285,12 +1285,12 @@ public void testChooseTargetWithTopology() throws Exception {
dataNodes[0], Collections.emptyList(), false,
new HashSet(), 0, policy1, null);
System.out.println(Arrays.asList(targets));
- Assert.assertEquals(3, targets.length);
+ Assertions.assertEquals(3, targets.length);
targets = replicator.chooseTarget("/foo", 3,
dataNodes[0], Collections.emptyList(), false,
new HashSet(), 0, policy2, null);
System.out.println(Arrays.asList(targets));
- Assert.assertEquals(3, targets.length);
+ Assertions.assertEquals(3, targets.length);
}
@Test
@@ -1332,9 +1332,9 @@ public void testChooseSsdOverDisk() throws Exception {
dataNodes[0], Collections.emptyList(), false,
new HashSet(), 0, policy, null);
System.out.println(policy.getName() + ": " + Arrays.asList(targets));
- Assert.assertEquals(2, targets.length);
- Assert.assertEquals(StorageType.SSD, targets[0].getStorageType());
- Assert.assertEquals(StorageType.DISK, targets[1].getStorageType());
+ Assertions.assertEquals(2, targets.length);
+ Assertions.assertEquals(StorageType.SSD, targets[0].getStorageType());
+ Assertions.assertEquals(StorageType.DISK, targets[1].getStorageType());
}
@Test
@@ -1360,17 +1360,17 @@ public void testGetFileStoragePolicyAfterRestartNN() throws Exception {
// 4. Set Dir policy
fs.setStoragePolicy(dir, "HOT");
HdfsFileStatus status = fs.getClient().getFileInfo(file);
- // 5. get file policy, it should be parent policy.
- Assert
- .assertTrue("File storage policy should be HOT",
- status.getStoragePolicy() == HOT);
+ // 5. get file policy, it should be parent policy.
+ Assertions
+ .assertTrue(
+ status.getStoragePolicy() == HOT, "File storage policy should be HOT");
// 6. restart NameNode for reloading edits logs.
cluster.restartNameNode(true);
// 7. get file policy, it should be parent policy.
status = fs.getClient().getFileInfo(file);
- Assert
- .assertTrue("File storage policy should be HOT",
- status.getStoragePolicy() == HOT);
+ Assertions
+ .assertTrue(
+ status.getStoragePolicy() == HOT, "File storage policy should be HOT");
} finally {
cluster.shutdown();
@@ -1408,8 +1408,8 @@ public void testGetAllStoragePoliciesFromFs() throws IOException {
}
// Ensure that we got the same set of policies in both cases.
- Assert.assertTrue(Sets.difference(policyNamesSet1, policyNamesSet2).isEmpty());
- Assert.assertTrue(Sets.difference(policyNamesSet2, policyNamesSet1).isEmpty());
+ Assertions.assertTrue(Sets.difference(policyNamesSet1, policyNamesSet2).isEmpty());
+ Assertions.assertTrue(Sets.difference(policyNamesSet2, policyNamesSet1).isEmpty());
} finally {
cluster.shutdown();
}
@@ -1428,21 +1428,21 @@ public void testStorageType() {
{
final Iterator i = map.keySet().iterator();
- Assert.assertEquals(StorageType.RAM_DISK, i.next());
- Assert.assertEquals(StorageType.SSD, i.next());
- Assert.assertEquals(StorageType.DISK, i.next());
- Assert.assertEquals(StorageType.ARCHIVE, i.next());
- Assert.assertEquals(StorageType.NVDIMM, i.next());
+ Assertions.assertEquals(StorageType.RAM_DISK, i.next());
+ Assertions.assertEquals(StorageType.SSD, i.next());
+ Assertions.assertEquals(StorageType.DISK, i.next());
+ Assertions.assertEquals(StorageType.ARCHIVE, i.next());
+ Assertions.assertEquals(StorageType.NVDIMM, i.next());
}
{
final Iterator> i
= map.entrySet().iterator();
- Assert.assertEquals(StorageType.RAM_DISK, i.next().getKey());
- Assert.assertEquals(StorageType.SSD, i.next().getKey());
- Assert.assertEquals(StorageType.DISK, i.next().getKey());
- Assert.assertEquals(StorageType.ARCHIVE, i.next().getKey());
- Assert.assertEquals(StorageType.NVDIMM, i.next().getKey());
+ Assertions.assertEquals(StorageType.RAM_DISK, i.next().getKey());
+ Assertions.assertEquals(StorageType.SSD, i.next().getKey());
+ Assertions.assertEquals(StorageType.DISK, i.next().getKey());
+ Assertions.assertEquals(StorageType.ARCHIVE, i.next().getKey());
+ Assertions.assertEquals(StorageType.NVDIMM, i.next().getKey());
}
}
@@ -1600,7 +1600,7 @@ private void testStorageIDCheckAccessResult(String[] requested,
public void testCreateDefaultPoliciesFromConf() {
BlockStoragePolicySuite suite =
BlockStoragePolicySuite.createDefaultSuite();
- Assert.assertEquals(HdfsConstants.StoragePolicy.HOT.value(),
+ Assertions.assertEquals(HdfsConstants.StoragePolicy.HOT.value(),
suite.getDefaultPolicy().getId());
Configuration newConf = new Configuration();
@@ -1608,7 +1608,7 @@ public void testCreateDefaultPoliciesFromConf() {
HdfsConstants.StoragePolicy.ONE_SSD);
BlockStoragePolicySuite suiteConf =
BlockStoragePolicySuite.createDefaultSuite(newConf);
- Assert.assertEquals(HdfsConstants.StoragePolicy.ONE_SSD.value(),
+ Assertions.assertEquals(HdfsConstants.StoragePolicy.ONE_SSD.value(),
suiteConf.getDefaultPolicy().getId());
}
@@ -1627,7 +1627,7 @@ public void testCreateFileWithConfiguredDefaultPolicies()
DFSTestUtil.createFile(newfs, fooFile, 0, REPLICATION, 0L);
String policy = newfs.getStoragePolicy(fooFile).getName();
- Assert.assertEquals(HdfsConstants.StoragePolicy.WARM.name(), policy);
+ Assertions.assertEquals(HdfsConstants.StoragePolicy.WARM.name(), policy);
} finally {
cluster.shutdown();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockTokenWrappingQOP.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockTokenWrappingQOP.java
index c224c4916b57d..1b5431dc3fb7f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockTokenWrappingQOP.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockTokenWrappingQOP.java
@@ -36,14 +36,15 @@
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferTestCase;
import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.security.TestPermission;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
/**
@@ -77,7 +78,7 @@ public TestBlockTokenWrappingQOP(String configKey, String qopValue) {
this.qopValue = qopValue;
}
- @Before
+ @BeforeEach
public void setup() throws Exception {
conf = createSecureConfig(this.configKey);
conf.set(DFS_NAMENODE_RPC_ADDRESS_AUXILIARY_KEY, "12000");
@@ -109,7 +110,7 @@ public void setup() throws Exception {
dfs = (DistributedFileSystem) FileSystem.get(uriAuxiliary, conf);
}
- @After
+ @AfterEach
public void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java
index 95d6825d29740..574474d91990f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
import java.io.IOException;
import java.util.ArrayList;
@@ -35,8 +35,8 @@
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
-import org.junit.After;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Test;
/**
* This class tests DatanodeDescriptor.getBlocksScheduled() at the
@@ -47,7 +47,7 @@ public class TestBlocksScheduledCounter {
MiniDFSCluster cluster = null;
FileSystem fs = null;
- @After
+ @AfterEach
public void tearDown() throws IOException {
if (fs != null) {
fs.close();
@@ -104,8 +104,8 @@ public void testScheduledBlocksCounterShouldDecrementOnAbandonBlock()
ArrayList dnList = new ArrayList();
datanodeManager.fetchDatanodes(dnList, dnList, false);
for (DatanodeDescriptor descriptor : dnList) {
- assertEquals("Blocks scheduled should be 0 for " + descriptor.getName(),
- 0, descriptor.getBlocksScheduled());
+ assertEquals(
+ 0, descriptor.getBlocksScheduled(), "Blocks scheduled should be 0 for " + descriptor.getName());
}
cluster.getDataNodes().get(0).shutdown();
@@ -120,21 +120,21 @@ public void testScheduledBlocksCounterShouldDecrementOnAbandonBlock()
DatanodeDescriptor abandonedDn = datanodeManager.getDatanode(cluster
.getDataNodes().get(0).getDatanodeId());
- assertEquals("for the abandoned dn scheduled counts should be 0", 0,
- abandonedDn.getBlocksScheduled());
+ assertEquals(0,
+ abandonedDn.getBlocksScheduled(), "for the abandoned dn scheduled counts should be 0");
for (DatanodeDescriptor descriptor : dnList) {
if (descriptor.equals(abandonedDn)) {
continue;
}
- assertEquals("Blocks scheduled should be 1 for " + descriptor.getName(),
- 1, descriptor.getBlocksScheduled());
+ assertEquals(
+ 1, descriptor.getBlocksScheduled(), "Blocks scheduled should be 1 for " + descriptor.getName());
}
// close the file and the counter should go to zero.
out.close();
for (DatanodeDescriptor descriptor : dnList) {
- assertEquals("Blocks scheduled should be 0 for " + descriptor.getName(),
- 0, descriptor.getBlocksScheduled());
+ assertEquals(
+ 0, descriptor.getBlocksScheduled(), "Blocks scheduled should be 0 for " + descriptor.getName());
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestByteBufferPread.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestByteBufferPread.java
index 1c7f1500f3689..6b51a52a91165 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestByteBufferPread.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestByteBufferPread.java
@@ -28,14 +28,11 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
/**
* This class tests the DFS positional read functionality on a single node
@@ -55,7 +52,7 @@ public class TestByteBufferPread {
private static final int BLOCK_SIZE = 4096;
private static final int FILE_SIZE = 12 * BLOCK_SIZE;
- @BeforeClass
+ @BeforeAll
public static void setup() throws IOException {
// Setup the cluster with a small block size so we can create small files
// that span multiple blocks
@@ -278,7 +275,7 @@ private void testPreadFullyWithByteBuffer(ByteBuffer buffer)
}
}
- @AfterClass
+ @AfterAll
public static void shutdown() throws IOException {
try {
fs.delete(testFile, false);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java
index 3b766f930a335..5db76b91653bc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java
@@ -17,9 +17,8 @@
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
import java.io.IOException;
import java.util.ArrayList;
@@ -31,6 +30,8 @@
import java.util.function.Supplier;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
@@ -54,8 +55,8 @@
import org.apache.hadoop.hdfs.tools.DFSAdmin;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
import org.mockito.Mockito;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -84,9 +85,9 @@ public class TestClientProtocolForPipelineRecovery {
// test getNewStampAndToken on a finalized block
try {
namenode.updateBlockForPipeline(firstBlock, "");
- Assert.fail("Can not get a new GS from a finalized block");
+ Assertions.fail("Can not get a new GS from a finalized block");
} catch (IOException e) {
- Assert.assertTrue(e.getMessage().contains(
+ Assertions.assertTrue(e.getMessage().contains(
"not " + BlockUCState.UNDER_CONSTRUCTION));
}
@@ -96,9 +97,9 @@ public class TestClientProtocolForPipelineRecovery {
ExtendedBlock newBlock = new ExtendedBlock(firstBlock.getBlockPoolId(),
newBlockId, 0, firstBlock.getGenerationStamp());
namenode.updateBlockForPipeline(newBlock, "");
- Assert.fail("Cannot get a new GS from a non-existent block");
+ Assertions.fail("Cannot get a new GS from a non-existent block");
} catch (IOException e) {
- Assert.assertTrue(e.getMessage().contains("does not exist"));
+ Assertions.assertTrue(e.getMessage().contains("does not exist"));
}
@@ -122,17 +123,17 @@ public class TestClientProtocolForPipelineRecovery {
DFSClient dfs = ((DistributedFileSystem)fileSys).dfs;
try {
namenode.updateBlockForPipeline(firstBlock, "test" + dfs.clientName);
- Assert.fail("Cannot get a new GS for a non lease holder");
+ Assertions.fail("Cannot get a new GS for a non lease holder");
} catch (LeaseExpiredException e) {
- Assert.assertTrue(e.getMessage().startsWith("Lease mismatch"));
+ Assertions.assertTrue(e.getMessage().startsWith("Lease mismatch"));
}
// test null lease holder
try {
namenode.updateBlockForPipeline(firstBlock, null);
- Assert.fail("Cannot get a new GS for a null lease holder");
+ Assertions.fail("Cannot get a new GS for a null lease holder");
} catch (LeaseExpiredException e) {
- Assert.assertTrue(e.getMessage().startsWith("Lease mismatch"));
+ Assertions.assertTrue(e.getMessage().startsWith("Lease mismatch"));
}
// test getNewStampAndToken on a rbw block
@@ -177,7 +178,7 @@ public void testPipelineRecoveryForLastBlock() throws IOException {
// Test will fail with BlockMissingException if NN does not update the
// replica state based on the latest report.
} catch (org.apache.hadoop.hdfs.BlockMissingException bme) {
- Assert.fail("Block is missing because the file was closed with"
+ Assertions.fail("Block is missing because the file was closed with"
+ " corrupt replicas.");
}
} finally {
@@ -239,7 +240,7 @@ public boolean dropHeartbeatPacket() {
contains = true;
}
}
- Assert.assertTrue(contains);
+ Assertions.assertTrue(contains);
} finally {
DataNodeFaultInjector.set(oldDnInjector);
if (cluster != null) {
@@ -322,7 +323,7 @@ public void testPipelineRecoveryOnOOB() throws Exception {
final String dnAddr = dn.getDatanodeId().getIpcAddr(false);
// issue shutdown to the datanode.
final String[] args1 = {"-shutdownDatanode", dnAddr, "upgrade" };
- Assert.assertEquals(0, dfsadmin.run(args1));
+ Assertions.assertEquals(0, dfsadmin.run(args1));
// Wait long enough to receive an OOB ack before closing the file.
GenericTestUtils.waitForThreadTermination(
"Async datanode shutdown thread", 100, 10000);
@@ -358,23 +359,23 @@ public void testEvictWriter() throws Exception {
// get nodes in the pipeline
DFSOutputStream dfsOut = (DFSOutputStream)out.getWrappedStream();
DatanodeInfo[] nodes = dfsOut.getPipeline();
- Assert.assertEquals(2, nodes.length);
+ Assertions.assertEquals(2, nodes.length);
String dnAddr = nodes[1].getIpcAddr(false);
// evict the writer from the second datanode and wait until
// the pipeline is rebuilt.
DFSAdmin dfsadmin = new DFSAdmin(conf);
final String[] args1 = {"-evictWriters", dnAddr };
- Assert.assertEquals(0, dfsadmin.run(args1));
+ Assertions.assertEquals(0, dfsadmin.run(args1));
out.write(0x31);
out.hflush();
// get the new pipline and check the node is not in there.
nodes = dfsOut.getPipeline();
try {
- Assert.assertTrue(nodes.length > 0 );
+ Assertions.assertTrue(nodes.length > 0 );
for (int i = 0; i < nodes.length; i++) {
- Assert.assertFalse(dnAddr.equals(nodes[i].getIpcAddr(false)));
+ Assertions.assertFalse(dnAddr.equals(nodes[i].getIpcAddr(false)));
}
} finally {
out.close();
@@ -410,7 +411,7 @@ public void testPipelineRecoveryOnRestartFailure() throws Exception {
final String dnAddr1 = dn.getDatanodeId().getIpcAddr(false);
// issue shutdown to the datanode.
final String[] args1 = {"-shutdownDatanode", dnAddr1, "upgrade" };
- Assert.assertEquals(0, dfsadmin.run(args1));
+ Assertions.assertEquals(0, dfsadmin.run(args1));
GenericTestUtils.waitForThreadTermination(
"Async datanode shutdown thread", 100, 10000);
// This should succeed without restarting the node. The restart will
@@ -427,7 +428,7 @@ public void testPipelineRecoveryOnRestartFailure() throws Exception {
final String dnAddr2 = dn.getDatanodeId().getIpcAddr(false);
// issue shutdown to the datanode.
final String[] args2 = {"-shutdownDatanode", dnAddr2, "upgrade" };
- Assert.assertEquals(0, dfsadmin.run(args2));
+ Assertions.assertEquals(0, dfsadmin.run(args2));
GenericTestUtils.waitForThreadTermination(
"Async datanode shutdown thread", 100, 10000);
try {
@@ -480,8 +481,8 @@ public Boolean get() {
return out.getBlock().getGenerationStamp() > oldGs;
}
}, 100, 10000);
- Assert.assertEquals("The pipeline recovery count shouldn't increase",
- 0, out.getStreamer().getPipelineRecoveryCount());
+ Assertions.assertEquals(
+ 0, out.getStreamer().getPipelineRecoveryCount(), "The pipeline recovery count shouldn't increase");
out.write(1);
out.close();
// Ensure that subsequent closes are idempotent and do not throw errors
@@ -539,7 +540,7 @@ public void run() {
Thread.sleep(1000);
DatanodeInfo[] pipeline = out.getPipeline();
for (DatanodeInfo node : pipeline) {
- assertFalse("Write should be going on", failed.get());
+ assertFalse(failed.get(), "Write should be going on");
ArrayList dataNodes = cluster.getDataNodes();
int indexToShutdown = 0;
for (int i = 0; i < dataNodes.size(); i++) {
@@ -564,15 +565,15 @@ public Boolean get() {
return out.getBlock().getGenerationStamp() > oldGs;
}
}, 100, 10000);
- Assert.assertEquals("The pipeline recovery count shouldn't increase", 0,
- out.getStreamer().getPipelineRecoveryCount());
+ Assertions.assertEquals(0,
+ out.getStreamer().getPipelineRecoveryCount(), "The pipeline recovery count shouldn't increase");
}
- assertFalse("Write should be going on", failed.get());
+ assertFalse(failed.get(), "Write should be going on");
running.set(false);
t.join();
out.write("testagain".getBytes());
- assertTrue("There should be atleast 2 nodes in pipeline still", out
- .getPipeline().length >= 2);
+ assertTrue(out
+ .getPipeline().length >= 2, "There should be atleast 2 nodes in pipeline still");
out.close();
} finally {
DFSClientFaultInjector.set(old);
@@ -723,7 +724,7 @@ public void failPipeline(ReplicaInPipeline replicaInfo,
o.hflush();
}
- assertTrue("Expected a failure in the pipeline", failed.get());
+ assertTrue(failed.get(), "Expected a failure in the pipeline");
DatanodeInfo[] newNodes = dfsO.getStreamer().getNodes();
o.close();
// Trigger block report to NN
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java
index 2f5aa96757da9..723d3b2603b46 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java
@@ -41,10 +41,10 @@
import org.apache.hadoop.hdfs.tools.DFSck;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.util.ToolRunner;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
/**
* Class is used to test client reporting corrupted block replica to name node.
@@ -67,7 +67,7 @@ public class TestClientReportBadBlock {
Random rand = new Random();
- @Before
+ @BeforeEach
public void startUpCluster() throws IOException {
// disable block scanner
conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);
@@ -80,7 +80,7 @@ public void startUpCluster() throws IOException {
buffersize = conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096);
}
- @After
+ @AfterEach
public void shutDownCluster() throws IOException {
if (dfs != null) {
dfs.close();
@@ -211,7 +211,7 @@ private void createAFileWithCorruptedBlockReplicas(Path filePath, short repl,
// Locate the file blocks by asking name node
final LocatedBlocks locatedblocks = dfs.dfs.getNamenode()
.getBlockLocations(filePath.toString(), 0L, BLOCK_SIZE);
- Assert.assertEquals(repl, locatedblocks.get(0).getLocations().length);
+ Assertions.assertEquals(repl, locatedblocks.get(0).getLocations().length);
// The file only has one block
LocatedBlock lblock = locatedblocks.get(0);
DatanodeInfo[] datanodeinfos = lblock.getLocations();
@@ -236,7 +236,7 @@ private void verifyFirstBlockCorrupted(Path filePath, boolean isCorrupted)
final LocatedBlocks locatedBlocks = dfs.dfs.getNamenode()
.getBlockLocations(filePath.toUri().getPath(), 0, Long.MAX_VALUE);
final LocatedBlock firstLocatedBlock = locatedBlocks.get(0);
- Assert.assertEquals(isCorrupted, firstLocatedBlock.isCorrupt());
+ Assertions.assertEquals(isCorrupted, firstLocatedBlock.isCorrupt());
}
/**
@@ -250,7 +250,7 @@ private void verifyCorruptedBlockCount(Path filePath, int expectedReplicas)
filePath.toUri().getPath(), 0, Long.MAX_VALUE);
// we expect only the first block of the file is used for this test
LocatedBlock firstLocatedBlock = lBlocks.get(0);
- Assert.assertEquals(expectedReplicas,
+ Assertions.assertEquals(expectedReplicas,
firstLocatedBlock.getLocations().length);
}
@@ -300,23 +300,23 @@ private static void verifyFsckHealth(String expected) throws Exception {
// Make sure filesystem is in healthy state
String outStr = runFsck(conf, 0, true, "/");
LOG.info(outStr);
- Assert.assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+ Assertions.assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
if (!expected.equals("")) {
- Assert.assertTrue(outStr.contains(expected));
+ Assertions.assertTrue(outStr.contains(expected));
}
}
private static void verifyFsckBlockCorrupted() throws Exception {
String outStr = runFsck(conf, 1, true, "/");
LOG.info(outStr);
- Assert.assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
+ Assertions.assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
}
private static void testFsckListCorruptFilesBlocks(Path filePath, int errorCode) throws Exception{
String outStr = runFsck(conf, errorCode, true, filePath.toString(), "-list-corruptfileblocks");
LOG.info("fsck -list-corruptfileblocks out: " + outStr);
if (errorCode != 0) {
- Assert.assertTrue(outStr.contains("CORRUPT blocks"));
+ Assertions.assertTrue(outStr.contains("CORRUPT blocks"));
}
}
@@ -326,7 +326,7 @@ static String runFsck(Configuration conf, int expectedErrCode,
PrintStream out = new PrintStream(bStream, true);
int errCode = ToolRunner.run(new DFSck(conf, out), path);
if (checkErrorCode)
- Assert.assertEquals(expectedErrCode, errCode);
+ Assertions.assertEquals(expectedErrCode, errCode);
return bStream.toString();
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClose.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClose.java
index 94f3612bd76d0..db35a6b4a53a2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClose.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClose.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.fail;
import java.io.IOException;
import java.io.OutputStream;
@@ -26,7 +26,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
public class TestClose {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java
index 85a4d19539ad2..a27109487325b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
import java.io.IOException;
import java.net.InetSocketAddress;
@@ -27,8 +27,8 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.client.impl.BlockReaderTestUtil;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
/**
* This class tests the client connection caching in a single node
@@ -52,7 +52,7 @@ private void pread(DFSInputStream in,
int length,
byte[] authenticData)
throws IOException {
- Assert.assertTrue("Test buffer too small", buffer.length >= offset + length);
+ Assertions.assertTrue(buffer.length >= offset + length, "Test buffer too small");
if (pos >= 0)
in.seek(pos);
@@ -62,7 +62,7 @@ private void pread(DFSInputStream in,
while (length > 0) {
int cnt = in.read(buffer, offset, length);
- Assert.assertTrue("Error in read", cnt > 0);
+ Assertions.assertTrue(cnt > 0, "Error in read");
offset += cnt;
length -= cnt;
}
@@ -71,9 +71,9 @@ private void pread(DFSInputStream in,
for (int i = 0; i < length; ++i) {
byte actual = buffer[i];
byte expect = authenticData[(int)pos + i];
- assertEquals("Read data mismatch at file offset " + (pos + i) +
- ". Expects " + expect + "; got " + actual,
- actual, expect);
+ assertEquals(
+ actual, expect, "Read data mismatch at file offset " + (pos + i) +
+ ". Expects " + expect + "; got " + actual);
}
}
@@ -116,7 +116,7 @@ public void testReadFromOneDN() throws Exception {
in.close();
client.close();
- Assert.assertEquals(1,
+ Assertions.assertEquals(1,
ClientContext.getFromConf(configuration).getPeerCache().size());
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java
index 917f0dbe09395..2ba7ad651d4eb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java
@@ -18,9 +18,7 @@
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
import java.io.IOException;
import java.util.List;
@@ -36,8 +34,8 @@
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
import org.apache.hadoop.io.IOUtils;
-import org.junit.Before;
import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
import org.mockito.Mockito;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -75,7 +73,7 @@ public class TestCrcCorruption {
private DFSClientFaultInjector faultInjector;
- @Before
+ @BeforeEach
public void setUp() throws IOException {
faultInjector = Mockito.mock(DFSClientFaultInjector.class);
DFSClientFaultInjector.set(faultInjector);
@@ -174,7 +172,7 @@ private void thistest(Configuration conf, DFSTestUtil util) throws Exception {
final String bpid = cluster.getNamesystem().getBlockPoolId();
List replicas =
dn.getFSDataset().getFinalizedBlocks(bpid);
- assertTrue("Replicas do not exist", !replicas.isEmpty());
+ assertTrue(!replicas.isEmpty(), "Replicas do not exist");
for (int idx = 0; idx < replicas.size(); idx++) {
ReplicaInfo replica = replicas.get(idx);
@@ -192,12 +190,12 @@ private void thistest(Configuration conf, DFSTestUtil util) throws Exception {
}
}
- //
- // Only one replica is possibly corrupted. The other replica should still
- // be good. Verify.
- //
- assertTrue("Corrupted replicas not handled properly.",
- util.checkFiles(fs, "/srcdat"));
+ //
+ // Only one replica is possibly corrupted. The other replica should still
+ // be good. Verify.
+ //
+ assertTrue(
+ util.checkFiles(fs, "/srcdat"), "Corrupted replicas not handled properly.");
LOG.info("All File still have a valid replica");
//
@@ -287,7 +285,7 @@ private void doTestEntirelyCorruptFile(int numDataNodes) throws Exception {
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, file);
int blockFilesCorrupted = cluster.corruptBlockOnDataNodes(block);
- assertEquals("All replicas not corrupted", replFactor, blockFilesCorrupted);
+ assertEquals(replFactor, blockFilesCorrupted, "All replicas not corrupted");
try {
IOUtils.copyBytes(fs.open(file), new IOUtils.NullOutputStream(), conf,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java
index c61c0b1a85e8f..54920f247beab 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java
@@ -27,8 +27,8 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.IOException;
import java.util.ArrayList;
@@ -37,7 +37,7 @@
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
public class TestDFSAddressConfig {
@@ -67,7 +67,7 @@ public void testDFSAddressConfig() throws IOException {
*------------------------------------------------------------------------*/
for (int i = 0; i < dns.size(); i++) {
DataNodeProperties dnp = cluster.stopDataNode(i);
- assertNotNull("Should have been able to stop simulated datanode", dnp);
+ assertNotNull(dnp, "Should have been able to stop simulated datanode");
}
conf.unset(DFS_DATANODE_ADDRESS_KEY);
@@ -92,7 +92,7 @@ public void testDFSAddressConfig() throws IOException {
*------------------------------------------------------------------------*/
for (int i = 0; i < dns.size(); i++) {
DataNodeProperties dnp = cluster.stopDataNode(i);
- assertNotNull("Should have been able to stop simulated datanode", dnp);
+ assertNotNull(dnp, "Should have been able to stop simulated datanode");
}
conf.set(DFS_DATANODE_ADDRESS_KEY, "0.0.0.0:0");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java
index 59cc154071668..49d4a91ff460f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.fail;
import java.io.IOException;
import java.io.OutputStream;
@@ -29,10 +29,10 @@
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.util.ThreadUtil;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
/**
@@ -44,13 +44,13 @@ public class TestDFSClientExcludedNodes {
private MiniDFSCluster cluster;
private Configuration conf;
- @Before
+ @BeforeEach
public void setUp() {
cluster = null;
conf = new HdfsConfiguration();
}
- @After
+ @AfterEach
public void tearDown() {
if (cluster != null) {
cluster.shutdown();
@@ -122,8 +122,8 @@ public void testExcludedNodesForgiveness() throws IOException {
// Bring back the older DNs, since they are gonna be forgiven only
// afterwards of this previous block write.
- Assert.assertEquals(true, cluster.restartDataNode(one, true));
- Assert.assertEquals(true, cluster.restartDataNode(two, true));
+ Assertions.assertEquals(true, cluster.restartDataNode(one, true));
+ Assertions.assertEquals(true, cluster.restartDataNode(two, true));
cluster.waitActive();
// Sleep for 5s, to let the excluded nodes be expired
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java
index f65bc3a92d18b..746714af9c7a2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java
@@ -17,10 +17,7 @@
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
import java.io.IOException;
import java.lang.reflect.Field;
@@ -34,6 +31,7 @@
import javax.net.SocketFactory;
+import org.junit.jupiter.api.Assertions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
@@ -55,10 +53,9 @@
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.StringUtils;
-import org.junit.After;
-import org.junit.Assume;
-import org.junit.Before;
import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
import org.mockito.ArgumentMatcher;
import org.mockito.Mockito;
@@ -74,7 +71,7 @@ public class TestDFSClientFailover {
private final Configuration conf = new Configuration();
private MiniDFSCluster cluster;
- @Before
+ @BeforeEach
public void setUpCluster() throws IOException {
cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleHATopology())
@@ -83,7 +80,7 @@ public void setUpCluster() throws IOException {
cluster.waitActive();
}
- @After
+ @AfterEach
public void tearDownCluster() throws IOException {
if (cluster != null) {
cluster.shutdown();
@@ -91,7 +88,7 @@ public void tearDownCluster() throws IOException {
}
}
- @After
+ @AfterEach
public void clearConfig() {
SecurityUtil.setTokenServiceUseIp(true);
}
@@ -217,9 +214,9 @@ public void testFailureWithMisconfiguredHaNNs() throws Exception {
fail("Successfully got proxy provider for misconfigured FS");
} catch (IOException ioe) {
LOG.info("got expected exception", ioe);
- assertTrue("expected exception did not contain helpful message",
- StringUtils.stringifyException(ioe).contains(
- "Could not find any configured addresses for URI " + uri));
+ assertTrue(
+ StringUtils.stringifyException(ioe).contains(
+ "Could not find any configured addresses for URI " + uri), "expected exception did not contain helpful message");
}
}
@@ -233,7 +230,7 @@ private NameService spyOnNameService() {
try {
Field f = InetAddress.class.getDeclaredField("nameServices");
f.setAccessible(true);
- Assume.assumeNotNull(f);
+ Assertions.assertNotNull(f);
@SuppressWarnings("unchecked")
List nsList = (List) f.get(null);
@@ -248,7 +245,7 @@ private NameService spyOnNameService() {
LOG.info("Unable to spy on DNS. Skipping test.", t);
// In case the JDK we're testing on doesn't work like Sun's, just
// skip the test.
- Assume.assumeNoException(t);
+ // Assume.assumeNoException(t); // TODO: Should be safe to remove this?
throw new RuntimeException(t);
}
}
@@ -377,9 +374,9 @@ public void testWrappedFailoverProxyProvider() throws Exception {
// not to use IP address for token service
SecurityUtil.setTokenServiceUseIp(false);
- // Logical URI should be used.
- assertTrue("Legacy proxy providers should use logical URI.",
- HAUtil.useLogicalUri(config, p.toUri()));
+ // Logical URI should be used.
+ assertTrue(
+ HAUtil.useLogicalUri(config, p.toUri()), "Legacy proxy providers should use logical URI.");
}
/**
@@ -394,8 +391,8 @@ public void testIPFailoverProxyProviderLogicalUri() throws Exception {
nnUri.getHost(),
IPFailoverProxyProvider.class.getName());
- assertFalse("IPFailoverProxyProvider should not use logical URI.",
- HAUtil.useLogicalUri(config, nnUri));
+ assertFalse(
+ HAUtil.useLogicalUri(config, nnUri), "IPFailoverProxyProvider should not use logical URI.");
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
index 970003b0e58cc..229cf2f7df36e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
@@ -18,10 +18,7 @@
package org.apache.hadoop.hdfs;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyBoolean;
import static org.mockito.ArgumentMatchers.anyLong;
@@ -90,9 +87,9 @@
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
-import org.junit.Assert;
-import org.junit.Before;
import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
import org.mockito.Mockito;
import org.mockito.internal.stubbing.answers.ThrowsException;
import org.mockito.invocation.InvocationOnMock;
@@ -160,7 +157,7 @@ private static void writeData(OutputStream out, int len) throws IOException {
}
}
- @Before
+ @BeforeEach
public void setupConf(){
conf = new HdfsConfiguration();
}
@@ -285,8 +282,8 @@ public Object answer(InvocationOnMock invocation)
try {
os.close();
} catch (Exception e) {
- assertTrue("Retries are not being stopped correctly: " + e.getMessage(),
- e.getMessage().equals(exceptionMsg));
+ assertTrue(
+ e.getMessage().equals(exceptionMsg), "Retries are not being stopped correctly: " + e.getMessage());
}
}
@@ -632,7 +629,7 @@ public void testDFSClientRetriesOnBusyBlocks() throws IOException {
timestamp = Time.now();
pass = busyTest(xcievers, threads, fileLen, timeWin, retries);
timestamp2 = Time.now();
- assertTrue("Something wrong! Test 2 got Exception with maxmum retries!", pass);
+ assertTrue(pass, "Something wrong! Test 2 got Exception with maxmum retries!");
LOG.info("Test 2 succeeded! Time spent: " + (timestamp2-timestamp)/1000.0 + " sec.");
//
@@ -657,7 +654,7 @@ public void testDFSClientRetriesOnBusyBlocks() throws IOException {
timestamp = Time.now();
pass = busyTest(xcievers, threads, fileLen, timeWin, retries);
timestamp2 = Time.now();
- assertTrue("Something wrong! Test 4 got Exception with maxmum retries!", pass);
+ assertTrue(pass, "Something wrong! Test 4 got Exception with maxmum retries!");
LOG.info("Test 4 succeeded! Time spent: " + (timestamp2-timestamp)/1000.0 + " sec.");
}
@@ -692,10 +689,10 @@ private boolean busyTest(int xcievers, int threads, int fileLen, int timeWin, in
bufferSize,
replicationFactor,
blockSize);
-
- // verify that file exists in FS namespace
- assertTrue(file1 + " should be a file",
- fs.getFileStatus(file1).isFile());
+
+ // verify that file exists in FS namespace
+ assertTrue(
+ fs.getFileStatus(file1).isFile(), file1 + " should be a file");
System.out.println("Path : \"" + file1 + "\"");
LOG.info("Path : \"" + file1 + "\"");
@@ -706,10 +703,10 @@ private boolean busyTest(int xcievers, int threads, int fileLen, int timeWin, in
// verify that file size has changed to the full size
long len = fs.getFileStatus(file1).getLen();
-
- assertTrue(file1 + " should be of size " + fileLen +
- " but found to be of size " + len,
- len == fileLen);
+
+ assertTrue(
+ len == fileLen, file1 + " should be of size " + fileLen +
+ " but found to be of size " + len);
// read back and check data integrigy
byte[] read_buf = new byte[fileLen];
@@ -809,11 +806,11 @@ public void run() {
in.close();
fs.close();
- assertTrue("hashed keys are not the same size",
- hash_sha.length == expected_sha.length);
+ assertTrue(
+ hash_sha.length == expected_sha.length, "hashed keys are not the same size");
- assertTrue("hashed keys are not equal",
- Arrays.equals(hash_sha, expected_sha));
+ assertTrue(
+ Arrays.equals(hash_sha, expected_sha), "hashed keys are not equal");
counter.inc(); // count this thread as successful
@@ -928,8 +925,8 @@ public void testRetryOnChecksumFailure() throws Exception {
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, path);
int blockFilesCorrupted = cluster.corruptBlockOnDataNodes(block);
- assertEquals("All replicas not corrupted", REPL_FACTOR,
- blockFilesCorrupted);
+ assertEquals(REPL_FACTOR,
+ blockFilesCorrupted, "All replicas not corrupted");
InetSocketAddress nnAddr =
new InetSocketAddress("localhost", cluster.getNameNodePort());
@@ -1107,13 +1104,13 @@ public void run() {
final FSDataInputStream in = fs.open(file4);
int count = 0;
for(int r; (r = in.read()) != -1; count++) {
- Assert.assertEquals(String.format("count=%d", count),
- bytes[count % bytes.length], (byte)r);
+ Assertions.assertEquals(
+ bytes[count % bytes.length], (byte) r, String.format("count=%d", count));
}
if (!isWebHDFS) {
- Assert.assertEquals(5 * bytes.length, count);
+ Assertions.assertEquals(5 * bytes.length, count);
} else {
- Assert.assertEquals(2 * bytes.length, count);
+ Assertions.assertEquals(2 * bytes.length, count);
}
in.close();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientSocketSize.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientSocketSize.java
index 1e6f03a1d0151..e5b7ac69882ea 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientSocketSize.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientSocketSize.java
@@ -20,9 +20,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
import org.apache.hadoop.test.GenericTestUtils;
-
-import org.junit.Test;
-
+import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.event.Level;
@@ -31,7 +29,7 @@
import java.net.Socket;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_SOCKET_SEND_BUFFER_SIZE_KEY;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
public class TestDFSClientSocketSize {
private static final Logger LOG = LoggerFactory.getLogger(
@@ -49,8 +47,8 @@ public void testDefaultSendBufferSize() throws IOException {
final int sendBufferSize = getSendBufferSize(new Configuration());
LOG.info("If not specified, the auto tuned send buffer size is: {}",
sendBufferSize);
- assertTrue("Send buffer size should be non-negative value which is " +
- "determined by system (kernel).", sendBufferSize > 0);
+ assertTrue(sendBufferSize > 0, "Send buffer size should be non-negative value which is " +
+ "determined by system (kernel).");
}
/**
@@ -69,8 +67,8 @@ public void testSpecifiedSendBufferSize() throws IOException {
LOG.info("Large buf size is {}, small is {}",
sendBufferSize1, sendBufferSize2);
- assertTrue("Larger specified send buffer should have effect",
- sendBufferSize1 > sendBufferSize2);
+ assertTrue(
+ sendBufferSize1 > sendBufferSize2, "Larger specified send buffer should have effect");
}
/**
@@ -83,8 +81,8 @@ public void testAutoTuningSendBufferSize() throws IOException {
conf.setInt(DFS_CLIENT_SOCKET_SEND_BUFFER_SIZE_KEY, 0);
final int sendBufferSize = getSendBufferSize(conf);
LOG.info("The auto tuned send buffer size is: {}", sendBufferSize);
- assertTrue("Send buffer size should be non-negative value which is " +
- "determined by system (kernel).", sendBufferSize > 0);
+ assertTrue(sendBufferSize > 0, "Send buffer size should be non-negative value which is " +
+ "determined by system (kernel).");
}
private int getSendBufferSize(Configuration conf) throws IOException {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java
index 01210d2dab786..b15faab9794bd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java
@@ -18,8 +18,8 @@
package org.apache.hadoop.hdfs;
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
import java.io.File;
import java.util.Collections;
@@ -33,8 +33,8 @@
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
import org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceStorage;
import org.apache.hadoop.hdfs.server.datanode.DataStorage;
-import org.junit.After;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Test;
/**
* This test ensures the appropriate response from the system when
@@ -184,7 +184,7 @@ public void testFinalize() throws Exception {
} // end numDir loop
}
- @After
+ @AfterEach
public void tearDown() throws Exception {
LOG.info("Shutting down MiniDFSCluster");
if (cluster != null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java
index 05d3c63e52e26..d84048a8ed8dd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java
@@ -35,8 +35,8 @@
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes;
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
import org.apache.hadoop.util.ExitUtil;
-import org.junit.Assert;
import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
import java.io.IOException;
import java.io.OutputStream;
@@ -60,8 +60,8 @@ public static EventBatch waitForNextEvents(DFSInotifyEventInputStream eis)
}
private static long checkTxid(EventBatch batch, long prevTxid){
- Assert.assertTrue("Previous txid " + prevTxid + " was not less than " +
- "new txid " + batch.getTxid(), prevTxid < batch.getTxid());
+ Assertions.assertTrue(prevTxid < batch.getTxid(), "Previous txid " + prevTxid + " was not less than " +
+ "new txid " + batch.getTxid());
return batch.getTxid();
}
@@ -73,7 +73,7 @@ private static long checkTxid(EventBatch batch, long prevTxid){
*/
@Test
public void testOpcodeCount() {
- Assert.assertEquals(54, FSEditLogOpCodes.values().length);
+ Assertions.assertEquals(54, FSEditLogOpCodes.values().length);
}
@@ -146,287 +146,287 @@ public void testBasic() throws IOException, URISyntaxException,
// RenameOp
batch = waitForNextEvents(eis);
- Assert.assertEquals(1, batch.getEvents().length);
+ Assertions.assertEquals(1, batch.getEvents().length);
long txid = batch.getTxid();
- Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.RENAME);
+ Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.RENAME);
Event.RenameEvent re = (Event.RenameEvent) batch.getEvents()[0];
- Assert.assertEquals("/file4", re.getDstPath());
- Assert.assertEquals("/file", re.getSrcPath());
- Assert.assertTrue(re.getTimestamp() > 0);
+ Assertions.assertEquals("/file4", re.getDstPath());
+ Assertions.assertEquals("/file", re.getSrcPath());
+ Assertions.assertTrue(re.getTimestamp() > 0);
LOG.info(re.toString());
- Assert.assertTrue(re.toString().startsWith("RenameEvent [srcPath="));
+ Assertions.assertTrue(re.toString().startsWith("RenameEvent [srcPath="));
long eventsBehind = eis.getTxidsBehindEstimate();
// RenameOldOp
batch = waitForNextEvents(eis);
- Assert.assertEquals(1, batch.getEvents().length);
+ Assertions.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
- Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.RENAME);
+ Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.RENAME);
Event.RenameEvent re2 = (Event.RenameEvent) batch.getEvents()[0];
- Assert.assertTrue(re2.getDstPath().equals("/file2"));
- Assert.assertTrue(re2.getSrcPath().equals("/file4"));
- Assert.assertTrue(re2.getTimestamp() > 0);
+ Assertions.assertTrue(re2.getDstPath().equals("/file2"));
+ Assertions.assertTrue(re2.getSrcPath().equals("/file4"));
+ Assertions.assertTrue(re2.getTimestamp() > 0);
LOG.info(re2.toString());
// AddOp with overwrite
batch = waitForNextEvents(eis);
- Assert.assertEquals(1, batch.getEvents().length);
+ Assertions.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
- Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
+ Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
Event.CreateEvent ce = (Event.CreateEvent) batch.getEvents()[0];
- Assert.assertTrue(ce.getiNodeType() == Event.CreateEvent.INodeType.FILE);
- Assert.assertTrue(ce.getPath().equals("/file2"));
- Assert.assertTrue(ce.getCtime() > 0);
- Assert.assertTrue(ce.getReplication() > 0);
- Assert.assertTrue(ce.getSymlinkTarget() == null);
- Assert.assertTrue(ce.getOverwrite());
- Assert.assertEquals(BLOCK_SIZE, ce.getDefaultBlockSize());
- Assert.assertTrue(ce.isErasureCoded().isPresent());
- Assert.assertFalse(ce.isErasureCoded().get());
+ Assertions.assertTrue(ce.getiNodeType() == Event.CreateEvent.INodeType.FILE);
+ Assertions.assertTrue(ce.getPath().equals("/file2"));
+ Assertions.assertTrue(ce.getCtime() > 0);
+ Assertions.assertTrue(ce.getReplication() > 0);
+ Assertions.assertTrue(ce.getSymlinkTarget() == null);
+ Assertions.assertTrue(ce.getOverwrite());
+ Assertions.assertEquals(BLOCK_SIZE, ce.getDefaultBlockSize());
+ Assertions.assertTrue(ce.isErasureCoded().isPresent());
+ Assertions.assertFalse(ce.isErasureCoded().get());
LOG.info(ce.toString());
- Assert.assertTrue(ce.toString().startsWith("CreateEvent [INodeType="));
+ Assertions.assertTrue(ce.toString().startsWith("CreateEvent [INodeType="));
// CloseOp
batch = waitForNextEvents(eis);
- Assert.assertEquals(1, batch.getEvents().length);
+ Assertions.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
- Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CLOSE);
+ Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CLOSE);
Event.CloseEvent ce2 = (Event.CloseEvent) batch.getEvents()[0];
- Assert.assertTrue(ce2.getPath().equals("/file2"));
- Assert.assertTrue(ce2.getFileSize() > 0);
- Assert.assertTrue(ce2.getTimestamp() > 0);
+ Assertions.assertTrue(ce2.getPath().equals("/file2"));
+ Assertions.assertTrue(ce2.getFileSize() > 0);
+ Assertions.assertTrue(ce2.getTimestamp() > 0);
LOG.info(ce2.toString());
- Assert.assertTrue(ce2.toString().startsWith("CloseEvent [path="));
+ Assertions.assertTrue(ce2.toString().startsWith("CloseEvent [path="));
// AppendOp
batch = waitForNextEvents(eis);
- Assert.assertEquals(1, batch.getEvents().length);
+ Assertions.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
- Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.APPEND);
+ Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.APPEND);
Event.AppendEvent append2 = (Event.AppendEvent)batch.getEvents()[0];
- Assert.assertEquals("/file2", append2.getPath());
- Assert.assertFalse(append2.toNewBlock());
+ Assertions.assertEquals("/file2", append2.getPath());
+ Assertions.assertFalse(append2.toNewBlock());
LOG.info(append2.toString());
- Assert.assertTrue(append2.toString().startsWith("AppendEvent [path="));
+ Assertions.assertTrue(append2.toString().startsWith("AppendEvent [path="));
// CloseOp
batch = waitForNextEvents(eis);
- Assert.assertEquals(1, batch.getEvents().length);
+ Assertions.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
- Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CLOSE);
- Assert.assertTrue(((Event.CloseEvent) batch.getEvents()[0]).getPath().equals("/file2"));
+ Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CLOSE);
+ Assertions.assertTrue(((Event.CloseEvent) batch.getEvents()[0]).getPath().equals("/file2"));
// TimesOp
batch = waitForNextEvents(eis);
- Assert.assertEquals(1, batch.getEvents().length);
+ Assertions.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
- Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
+ Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
Event.MetadataUpdateEvent mue = (Event.MetadataUpdateEvent) batch.getEvents()[0];
- Assert.assertTrue(mue.getPath().equals("/file2"));
- Assert.assertTrue(mue.getMetadataType() ==
+ Assertions.assertTrue(mue.getPath().equals("/file2"));
+ Assertions.assertTrue(mue.getMetadataType() ==
Event.MetadataUpdateEvent.MetadataType.TIMES);
LOG.info(mue.toString());
- Assert.assertTrue(mue.toString().startsWith("MetadataUpdateEvent [path="));
+ Assertions.assertTrue(mue.toString().startsWith("MetadataUpdateEvent [path="));
// SetReplicationOp
batch = waitForNextEvents(eis);
- Assert.assertEquals(1, batch.getEvents().length);
+ Assertions.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
- Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
+ Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
Event.MetadataUpdateEvent mue2 = (Event.MetadataUpdateEvent) batch.getEvents()[0];
- Assert.assertTrue(mue2.getPath().equals("/file2"));
- Assert.assertTrue(mue2.getMetadataType() ==
+ Assertions.assertTrue(mue2.getPath().equals("/file2"));
+ Assertions.assertTrue(mue2.getMetadataType() ==
Event.MetadataUpdateEvent.MetadataType.REPLICATION);
- Assert.assertTrue(mue2.getReplication() == 1);
+ Assertions.assertTrue(mue2.getReplication() == 1);
LOG.info(mue2.toString());
// ConcatDeleteOp
batch = waitForNextEvents(eis);
- Assert.assertEquals(3, batch.getEvents().length);
+ Assertions.assertEquals(3, batch.getEvents().length);
txid = checkTxid(batch, txid);
- Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.APPEND);
- Assert.assertTrue(((Event.AppendEvent) batch.getEvents()[0]).getPath().equals("/file2"));
- Assert.assertTrue(batch.getEvents()[1].getEventType() == Event.EventType.UNLINK);
+ Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.APPEND);
+ Assertions.assertTrue(((Event.AppendEvent) batch.getEvents()[0]).getPath().equals("/file2"));
+ Assertions.assertTrue(batch.getEvents()[1].getEventType() == Event.EventType.UNLINK);
Event.UnlinkEvent ue2 = (Event.UnlinkEvent) batch.getEvents()[1];
- Assert.assertTrue(ue2.getPath().equals("/file3"));
- Assert.assertTrue(ue2.getTimestamp() > 0);
+ Assertions.assertTrue(ue2.getPath().equals("/file3"));
+ Assertions.assertTrue(ue2.getTimestamp() > 0);
LOG.info(ue2.toString());
- Assert.assertTrue(ue2.toString().startsWith("UnlinkEvent [path="));
- Assert.assertTrue(batch.getEvents()[2].getEventType() == Event.EventType.CLOSE);
+ Assertions.assertTrue(ue2.toString().startsWith("UnlinkEvent [path="));
+ Assertions.assertTrue(batch.getEvents()[2].getEventType() == Event.EventType.CLOSE);
Event.CloseEvent ce3 = (Event.CloseEvent) batch.getEvents()[2];
- Assert.assertTrue(ce3.getPath().equals("/file2"));
- Assert.assertTrue(ce3.getTimestamp() > 0);
+ Assertions.assertTrue(ce3.getPath().equals("/file2"));
+ Assertions.assertTrue(ce3.getTimestamp() > 0);
// DeleteOp
batch = waitForNextEvents(eis);
- Assert.assertEquals(1, batch.getEvents().length);
+ Assertions.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
- Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.UNLINK);
+ Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.UNLINK);
Event.UnlinkEvent ue = (Event.UnlinkEvent) batch.getEvents()[0];
- Assert.assertTrue(ue.getPath().equals("/file2"));
- Assert.assertTrue(ue.getTimestamp() > 0);
+ Assertions.assertTrue(ue.getPath().equals("/file2"));
+ Assertions.assertTrue(ue.getTimestamp() > 0);
LOG.info(ue.toString());
// MkdirOp
batch = waitForNextEvents(eis);
- Assert.assertEquals(1, batch.getEvents().length);
+ Assertions.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
- Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
+ Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
Event.CreateEvent ce4 = (Event.CreateEvent) batch.getEvents()[0];
- Assert.assertTrue(ce4.getiNodeType() ==
+ Assertions.assertTrue(ce4.getiNodeType() ==
Event.CreateEvent.INodeType.DIRECTORY);
- Assert.assertTrue(ce4.getPath().equals("/dir"));
- Assert.assertTrue(ce4.getCtime() > 0);
- Assert.assertTrue(ce4.getReplication() == 0);
- Assert.assertTrue(ce4.getSymlinkTarget() == null);
+ Assertions.assertTrue(ce4.getPath().equals("/dir"));
+ Assertions.assertTrue(ce4.getCtime() > 0);
+ Assertions.assertTrue(ce4.getReplication() == 0);
+ Assertions.assertTrue(ce4.getSymlinkTarget() == null);
LOG.info(ce4.toString());
// SetPermissionsOp
batch = waitForNextEvents(eis);
- Assert.assertEquals(1, batch.getEvents().length);
+ Assertions.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
- Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
+ Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
Event.MetadataUpdateEvent mue3 = (Event.MetadataUpdateEvent) batch.getEvents()[0];
- Assert.assertTrue(mue3.getPath().equals("/dir"));
- Assert.assertTrue(mue3.getMetadataType() ==
+ Assertions.assertTrue(mue3.getPath().equals("/dir"));
+ Assertions.assertTrue(mue3.getMetadataType() ==
Event.MetadataUpdateEvent.MetadataType.PERMS);
- Assert.assertTrue(mue3.getPerms().toString().contains("rw-rw-rw-"));
+ Assertions.assertTrue(mue3.getPerms().toString().contains("rw-rw-rw-"));
LOG.info(mue3.toString());
// SetOwnerOp
batch = waitForNextEvents(eis);
- Assert.assertEquals(1, batch.getEvents().length);
+ Assertions.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
- Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
+ Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
Event.MetadataUpdateEvent mue4 = (Event.MetadataUpdateEvent) batch.getEvents()[0];
- Assert.assertTrue(mue4.getPath().equals("/dir"));
- Assert.assertTrue(mue4.getMetadataType() ==
+ Assertions.assertTrue(mue4.getPath().equals("/dir"));
+ Assertions.assertTrue(mue4.getMetadataType() ==
Event.MetadataUpdateEvent.MetadataType.OWNER);
- Assert.assertTrue(mue4.getOwnerName().equals("username"));
- Assert.assertTrue(mue4.getGroupName().equals("groupname"));
+ Assertions.assertTrue(mue4.getOwnerName().equals("username"));
+ Assertions.assertTrue(mue4.getGroupName().equals("groupname"));
LOG.info(mue4.toString());
// SymlinkOp
batch = waitForNextEvents(eis);
- Assert.assertEquals(1, batch.getEvents().length);
+ Assertions.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
- Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
+ Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
Event.CreateEvent ce5 = (Event.CreateEvent) batch.getEvents()[0];
- Assert.assertTrue(ce5.getiNodeType() ==
+ Assertions.assertTrue(ce5.getiNodeType() ==
Event.CreateEvent.INodeType.SYMLINK);
- Assert.assertTrue(ce5.getPath().equals("/dir2"));
- Assert.assertTrue(ce5.getCtime() > 0);
- Assert.assertTrue(ce5.getReplication() == 0);
- Assert.assertTrue(ce5.getSymlinkTarget().equals("/dir"));
+ Assertions.assertTrue(ce5.getPath().equals("/dir2"));
+ Assertions.assertTrue(ce5.getCtime() > 0);
+ Assertions.assertTrue(ce5.getReplication() == 0);
+ Assertions.assertTrue(ce5.getSymlinkTarget().equals("/dir"));
LOG.info(ce5.toString());
// SetXAttrOp
batch = waitForNextEvents(eis);
- Assert.assertEquals(1, batch.getEvents().length);
+ Assertions.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
- Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
+ Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
Event.MetadataUpdateEvent mue5 = (Event.MetadataUpdateEvent) batch.getEvents()[0];
- Assert.assertTrue(mue5.getPath().equals("/file5"));
- Assert.assertTrue(mue5.getMetadataType() ==
+ Assertions.assertTrue(mue5.getPath().equals("/file5"));
+ Assertions.assertTrue(mue5.getMetadataType() ==
Event.MetadataUpdateEvent.MetadataType.XATTRS);
- Assert.assertTrue(mue5.getxAttrs().size() == 1);
- Assert.assertTrue(mue5.getxAttrs().get(0).getName().contains("field"));
- Assert.assertTrue(!mue5.isxAttrsRemoved());
+ Assertions.assertTrue(mue5.getxAttrs().size() == 1);
+ Assertions.assertTrue(mue5.getxAttrs().get(0).getName().contains("field"));
+ Assertions.assertTrue(!mue5.isxAttrsRemoved());
LOG.info(mue5.toString());
// RemoveXAttrOp
batch = waitForNextEvents(eis);
- Assert.assertEquals(1, batch.getEvents().length);
+ Assertions.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
- Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
+ Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
Event.MetadataUpdateEvent mue6 = (Event.MetadataUpdateEvent) batch.getEvents()[0];
- Assert.assertTrue(mue6.getPath().equals("/file5"));
- Assert.assertTrue(mue6.getMetadataType() ==
+ Assertions.assertTrue(mue6.getPath().equals("/file5"));
+ Assertions.assertTrue(mue6.getMetadataType() ==
Event.MetadataUpdateEvent.MetadataType.XATTRS);
- Assert.assertTrue(mue6.getxAttrs().size() == 1);
- Assert.assertTrue(mue6.getxAttrs().get(0).getName().contains("field"));
- Assert.assertTrue(mue6.isxAttrsRemoved());
+ Assertions.assertTrue(mue6.getxAttrs().size() == 1);
+ Assertions.assertTrue(mue6.getxAttrs().get(0).getName().contains("field"));
+ Assertions.assertTrue(mue6.isxAttrsRemoved());
LOG.info(mue6.toString());
// SetAclOp (1)
batch = waitForNextEvents(eis);
- Assert.assertEquals(1, batch.getEvents().length);
+ Assertions.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
- Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
+ Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
Event.MetadataUpdateEvent mue7 = (Event.MetadataUpdateEvent) batch.getEvents()[0];
- Assert.assertTrue(mue7.getPath().equals("/file5"));
- Assert.assertTrue(mue7.getMetadataType() ==
+ Assertions.assertTrue(mue7.getPath().equals("/file5"));
+ Assertions.assertTrue(mue7.getMetadataType() ==
Event.MetadataUpdateEvent.MetadataType.ACLS);
- Assert.assertTrue(mue7.getAcls().contains(
+ Assertions.assertTrue(mue7.getAcls().contains(
AclEntry.parseAclEntry("user::rwx", true)));
LOG.info(mue7.toString());
// SetAclOp (2)
batch = waitForNextEvents(eis);
- Assert.assertEquals(1, batch.getEvents().length);
+ Assertions.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
- Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
+ Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
Event.MetadataUpdateEvent mue8 = (Event.MetadataUpdateEvent) batch.getEvents()[0];
- Assert.assertTrue(mue8.getPath().equals("/file5"));
- Assert.assertTrue(mue8.getMetadataType() ==
+ Assertions.assertTrue(mue8.getPath().equals("/file5"));
+ Assertions.assertTrue(mue8.getMetadataType() ==
Event.MetadataUpdateEvent.MetadataType.ACLS);
- Assert.assertTrue(mue8.getAcls() == null);
+ Assertions.assertTrue(mue8.getAcls() == null);
LOG.info(mue8.toString());
// RenameOp (2)
batch = waitForNextEvents(eis);
- Assert.assertEquals(1, batch.getEvents().length);
+ Assertions.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
- Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.RENAME);
+ Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.RENAME);
Event.RenameEvent re3 = (Event.RenameEvent) batch.getEvents()[0];
- Assert.assertTrue(re3.getDstPath().equals("/dir/file5"));
- Assert.assertTrue(re3.getSrcPath().equals("/file5"));
- Assert.assertTrue(re3.getTimestamp() > 0);
+ Assertions.assertTrue(re3.getDstPath().equals("/dir/file5"));
+ Assertions.assertTrue(re3.getSrcPath().equals("/file5"));
+ Assertions.assertTrue(re3.getTimestamp() > 0);
LOG.info(re3.toString());
// TruncateOp
batch = waitForNextEvents(eis);
- Assert.assertEquals(1, batch.getEvents().length);
+ Assertions.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
- Assert
+ Assertions
.assertTrue(batch.getEvents()[0].getEventType() ==
Event.EventType.TRUNCATE);
Event.TruncateEvent et = ((Event.TruncateEvent) batch.getEvents()[0]);
- Assert.assertTrue(et.getPath().equals("/truncate_file"));
- Assert.assertTrue(et.getFileSize() == BLOCK_SIZE);
- Assert.assertTrue(et.getTimestamp() > 0);
+ Assertions.assertTrue(et.getPath().equals("/truncate_file"));
+ Assertions.assertTrue(et.getFileSize() == BLOCK_SIZE);
+ Assertions.assertTrue(et.getTimestamp() > 0);
LOG.info(et.toString());
- Assert.assertTrue(et.toString().startsWith("TruncateEvent [path="));
+ Assertions.assertTrue(et.toString().startsWith("TruncateEvent [path="));
// CreateEvent without overwrite
batch = waitForNextEvents(eis);
- Assert.assertEquals(1, batch.getEvents().length);
+ Assertions.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
- Assert.assertTrue(batch.getEvents()[0].getEventType()
+ Assertions.assertTrue(batch.getEvents()[0].getEventType()
== Event.EventType.CREATE);
ce = (Event.CreateEvent) batch.getEvents()[0];
- Assert.assertTrue(ce.getiNodeType() == Event.CreateEvent.INodeType.FILE);
- Assert.assertTrue(ce.getPath().equals("/file_ec_test1"));
- Assert.assertTrue(ce.getCtime() > 0);
- Assert.assertTrue(ce.getReplication() > 0);
- Assert.assertTrue(ce.getSymlinkTarget() == null);
- Assert.assertFalse(ce.getOverwrite());
- Assert.assertEquals(BLOCK_SIZE, ce.getDefaultBlockSize());
- Assert.assertTrue(ce.isErasureCoded().isPresent());
- Assert.assertFalse(ce.isErasureCoded().get());
+ Assertions.assertTrue(ce.getiNodeType() == Event.CreateEvent.INodeType.FILE);
+ Assertions.assertTrue(ce.getPath().equals("/file_ec_test1"));
+ Assertions.assertTrue(ce.getCtime() > 0);
+ Assertions.assertTrue(ce.getReplication() > 0);
+ Assertions.assertTrue(ce.getSymlinkTarget() == null);
+ Assertions.assertFalse(ce.getOverwrite());
+ Assertions.assertEquals(BLOCK_SIZE, ce.getDefaultBlockSize());
+ Assertions.assertTrue(ce.isErasureCoded().isPresent());
+ Assertions.assertFalse(ce.isErasureCoded().get());
LOG.info(ce.toString());
- Assert.assertTrue(ce.toString().startsWith("CreateEvent [INodeType="));
+ Assertions.assertTrue(ce.toString().startsWith("CreateEvent [INodeType="));
// Returns null when there are no further events
- Assert.assertTrue(eis.poll() == null);
+ Assertions.assertTrue(eis.poll() == null);
// make sure the estimate hasn't changed since the above assertion
// tells us that we are fully caught up to the current namesystem state
// and we should not have been behind at all when eventsBehind was set
// either, since there were few enough events that they should have all
// been read to the client during the first poll() call
- Assert.assertTrue(eis.getTxidsBehindEstimate() == eventsBehind);
+ Assertions.assertTrue(eis.getTxidsBehindEstimate() == eventsBehind);
} finally {
cluster.shutdown();
@@ -470,41 +470,41 @@ public void testErasureCodedFiles() throws Exception {
EventBatch batch = null;
batch = waitForNextEvents(eis);
- Assert.assertEquals(1, batch.getEvents().length);
+ Assertions.assertEquals(1, batch.getEvents().length);
long txid = batch.getTxid();
long eventsBehind = eis.getTxidsBehindEstimate();
- Assert.assertTrue(batch.getEvents()[0].getEventType()
+ Assertions.assertTrue(batch.getEvents()[0].getEventType()
== Event.EventType.CREATE);
Event.CreateEvent ce = (Event.CreateEvent) batch.getEvents()[0];
- Assert.assertTrue(ce.getiNodeType() == Event.CreateEvent.INodeType.FILE);
- Assert.assertTrue(ce.getPath().equals("/ecdir/file_ec_test2"));
- Assert.assertTrue(ce.getCtime() > 0);
- Assert.assertEquals(1, ce.getReplication());
- Assert.assertTrue(ce.getSymlinkTarget() == null);
- Assert.assertTrue(ce.getOverwrite());
- Assert.assertEquals(ecPolicy.getCellSize(), ce.getDefaultBlockSize());
- Assert.assertTrue(ce.isErasureCoded().isPresent());
- Assert.assertTrue(ce.isErasureCoded().get());
+ Assertions.assertTrue(ce.getiNodeType() == Event.CreateEvent.INodeType.FILE);
+ Assertions.assertTrue(ce.getPath().equals("/ecdir/file_ec_test2"));
+ Assertions.assertTrue(ce.getCtime() > 0);
+ Assertions.assertEquals(1, ce.getReplication());
+ Assertions.assertTrue(ce.getSymlinkTarget() == null);
+ Assertions.assertTrue(ce.getOverwrite());
+ Assertions.assertEquals(ecPolicy.getCellSize(), ce.getDefaultBlockSize());
+ Assertions.assertTrue(ce.isErasureCoded().isPresent());
+ Assertions.assertTrue(ce.isErasureCoded().get());
LOG.info(ce.toString());
- Assert.assertTrue(ce.toString().startsWith("CreateEvent [INodeType="));
+ Assertions.assertTrue(ce.toString().startsWith("CreateEvent [INodeType="));
batch = waitForNextEvents(eis);
- Assert.assertEquals(1, batch.getEvents().length);
+ Assertions.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
- Assert.assertTrue(batch.getEvents()[0].getEventType()
+ Assertions.assertTrue(batch.getEvents()[0].getEventType()
== Event.EventType.CLOSE);
- Assert.assertTrue(((Event.CloseEvent) batch.getEvents()[0]).getPath()
+ Assertions.assertTrue(((Event.CloseEvent) batch.getEvents()[0]).getPath()
.equals("/ecdir/file_ec_test2"));
// Returns null when there are no further events
- Assert.assertTrue(eis.poll() == null);
+ Assertions.assertTrue(eis.poll() == null);
// make sure the estimate hasn't changed since the above assertion
// tells us that we are fully caught up to the current namesystem state
// and we should not have been behind at all when eventsBehind was set
// either, since there were few enough events that they should have all
// been read to the client during the first poll() call
- Assert.assertTrue(eis.getTxidsBehindEstimate() == eventsBehind);
+ Assertions.assertTrue(eis.getTxidsBehindEstimate() == eventsBehind);
} finally {
cluster.shutdown();
}
@@ -532,12 +532,12 @@ public void testNNFailover() throws IOException, URISyntaxException,
// active
for (int i = 0; i < 10; i++) {
batch = waitForNextEvents(eis);
- Assert.assertEquals(1, batch.getEvents().length);
- Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
- Assert.assertTrue(((Event.CreateEvent) batch.getEvents()[0]).getPath().equals("/dir" +
+ Assertions.assertEquals(1, batch.getEvents().length);
+ Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
+ Assertions.assertTrue(((Event.CreateEvent) batch.getEvents()[0]).getPath().equals("/dir" +
i));
}
- Assert.assertTrue(eis.poll() == null);
+ Assertions.assertTrue(eis.poll() == null);
} finally {
cluster.shutdown();
}
@@ -571,12 +571,12 @@ public void testTwoActiveNNs() throws IOException, MissingEventsException {
EventBatch batch = null;
for (int i = 0; i < 10; i++) {
batch = waitForNextEvents(eis);
- Assert.assertEquals(1, batch.getEvents().length);
- Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
- Assert.assertTrue(((Event.CreateEvent) batch.getEvents()[0]).getPath().equals("/dir" +
+ Assertions.assertEquals(1, batch.getEvents().length);
+ Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
+ Assertions.assertTrue(((Event.CreateEvent) batch.getEvents()[0]).getPath().equals("/dir" +
i));
}
- Assert.assertTrue(eis.poll() == null);
+ Assertions.assertTrue(eis.poll() == null);
} finally {
try {
cluster.shutdown();
@@ -615,10 +615,10 @@ public void run() {
// a very generous wait period -- the edit will definitely have been
// processed by the time this is up
EventBatch batch = eis.poll(5, TimeUnit.SECONDS);
- Assert.assertNotNull(batch);
- Assert.assertEquals(1, batch.getEvents().length);
- Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
- Assert.assertEquals("/dir", ((Event.CreateEvent) batch.getEvents()[0]).getPath());
+ Assertions.assertNotNull(batch);
+ Assertions.assertEquals(1, batch.getEvents().length);
+ Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
+ Assertions.assertEquals("/dir", ((Event.CreateEvent) batch.getEvents()[0]).getPath());
} finally {
cluster.shutdown();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStreamKerberized.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStreamKerberized.java
index c5537b5edc93f..90f31ac8864ed 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStreamKerberized.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStreamKerberized.java
@@ -33,10 +33,10 @@
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
-import org.junit.After;
-import org.junit.Before;
import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
import org.junit.rules.Timeout;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -64,10 +64,7 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
/**
* Class for Kerberized test cases for {@link DFSInotifyEventInputStream}.
@@ -131,7 +128,7 @@ public Void run() throws Exception {
while ((batch = eis.poll()) != null) {
LOG.info("txid: " + batch.getTxid());
}
- assertNull("poll should not return anything", eis.poll());
+ assertNull(eis.poll(), "poll should not return anything");
Thread.sleep(6000);
LOG.info("Slept 6 seconds to make sure the TGT has expired.");
@@ -143,16 +140,16 @@ public Void run() throws Exception {
// verify we can poll after a tgt expiration interval
batch = eis.poll();
- assertNotNull("poll should return something", batch);
+ assertNotNull(batch, "poll should return something");
assertEquals(1, batch.getEvents().length);
- assertNull("poll should not return anything", eis.poll());
+ assertNull(eis.poll(), "poll should not return anything");
return null;
}
}
});
}
- @Before
+ @BeforeEach
public void initKerberizedCluster() throws Exception {
baseDir = new File(System.getProperty("test.build.dir", "target/test-dir"),
TestDFSInotifyEventInputStreamKerberized.class.getSimpleName());
@@ -169,8 +166,8 @@ public void initKerberizedCluster() throws Exception {
SecurityUtil.setAuthenticationMethod(
UserGroupInformation.AuthenticationMethod.KERBEROS, baseConf);
UserGroupInformation.setConfiguration(baseConf);
- assertTrue("Expected configuration to enable security",
- UserGroupInformation.isSecurityEnabled());
+ assertTrue(
+ UserGroupInformation.isSecurityEnabled(), "Expected configuration to enable security");
final String userName = "hdfs";
nnKeytabFile = new File(baseDir, userName + ".keytab");
@@ -218,7 +215,7 @@ public void initKerberizedCluster() throws Exception {
KeyStoreTestUtil.getServerSSLConfigFileName());
}
- @After
+ @AfterEach
public void shutdownCluster() throws Exception {
if (cluster != null) {
cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java
index 2f9e0d319cdba..fa263b8d74fa6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java
@@ -18,11 +18,7 @@
package org.apache.hadoop.hdfs;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_READ_USE_CACHE_PRIORITY;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.hamcrest.CoreMatchers.equalTo;
+import static org.junit.jupiter.api.Assertions.*;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@@ -47,8 +43,8 @@
import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.Retry;
-import org.junit.Assume;
import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
public class TestDFSInputStream {
private void testSkipInner(MiniDFSCluster cluster) throws IOException {
@@ -108,7 +104,7 @@ public void testSkipWithRemoteBlockReader2() throws IOException {
@Test(timeout=60000)
public void testSkipWithLocalBlockReader() throws IOException {
- Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null));
+ Assertions.assertNull(DomainSocket.getLoadingFailureReason());
TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
DomainSocket.disableBindPathValidation();
Configuration conf = new Configuration();
@@ -218,10 +214,10 @@ public void testNullCheckSumWhenDNRestarted()
final List live = new ArrayList();
cluster.getNameNode().getNamesystem().getBlockManager()
.getDatanodeManager().fetchDatanodes(live, null, false);
- assertTrue("DN start should be success and live dn should be 2",
- live.size() == 2);
- assertTrue("File size should be " + chunkSize,
- fs.getFileStatus(file).getLen() == chunkSize);
+ assertTrue(
+ live.size() == 2, "DN start should be success and live dn should be 2");
+ assertTrue(
+ fs.getFileStatus(file).getLen() == chunkSize, "File size should be " + chunkSize);
} finally {
cluster.shutdown();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStreamBlockLocations.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStreamBlockLocations.java
index 9fed914c8d6fc..acbbc171be9c0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStreamBlockLocations.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStreamBlockLocations.java
@@ -19,11 +19,7 @@
package org.apache.hadoop.hdfs;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
import java.io.IOException;
import java.net.InetSocketAddress;
@@ -41,10 +37,10 @@
import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.util.Time;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
@@ -84,7 +80,7 @@ public TestDFSInputStreamBlockLocations(Boolean enableExpiration) {
enableBlkExpiration = enableExpiration;
}
- @Before
+ @BeforeEach
public void setup() throws IOException {
conf = new HdfsConfiguration();
conf.setBoolean(
@@ -119,7 +115,7 @@ public void setup() throws IOException {
fs = dfsCluster.getFileSystem();
}
- @After
+ @AfterEach
public void teardown() throws IOException {
if (dfsClient != null) {
dfsClient.close();
@@ -172,21 +168,21 @@ public void testRead() throws Exception {
DatanodeInfo[] firstBlkDNInfos = firstLocatedBlk.getLocations();
while (fin.getPos() < firstBlockMark) {
bytesRead = fin.read(readBuffer);
- Assert.assertTrue("Unexpected number of read bytes",
- chunkReadSize >= bytesRead);
+ Assertions.assertTrue(
+ chunkReadSize >= bytesRead, "Unexpected number of read bytes");
if (currDNInfo == null) {
currDNInfo = fin.getCurrentDatanode();
- assertNotNull("current FIS datanode is null", currDNInfo);
+ assertNotNull(currDNInfo, "current FIS datanode is null");
continue;
}
prevDNInfo = currDNInfo;
currDNInfo = fin.getCurrentDatanode();
- assertEquals("the DFSInput stream does not read from same node",
- prevDNInfo, currDNInfo);
+ assertEquals(
+ prevDNInfo, currDNInfo, "the DFSInput stream does not read from same node");
}
- assertEquals("InputStream exceeds expected position",
- firstBlockMark, fin.getPos());
+ assertEquals(
+ firstBlockMark, fin.getPos(), "InputStream exceeds expected position");
// get the second block locations
LocatedBlock secondLocatedBlk =
fin.locatedBlocks.getLocatedBlocks().get(1);
@@ -216,23 +212,23 @@ public void testRead() throws Exception {
}
while (fin.getPos() < secondBlockMark) {
bytesRead = fin.read(readBuffer);
- assertTrue("dead node used to read at position: " + fin.getPos(),
- fin.deadNodesContain(deadNodeInfo));
- Assert.assertTrue("Unexpected number of read bytes",
- chunkReadSize >= bytesRead);
+ assertTrue(
+ fin.deadNodesContain(deadNodeInfo), "dead node used to read at position: " + fin.getPos());
+ Assertions.assertTrue(
+ chunkReadSize >= bytesRead, "Unexpected number of read bytes");
prevDNInfo = currDNInfo;
currDNInfo = fin.getCurrentDatanode();
assertNotEquals(deadNodeInfo, currDNInfo);
if (firstIteration) {
- // currDNInfo has to be different unless first block locs is different
- assertFalse("FSInputStream should pick a different DN",
- firstBlkDNInfos[0].equals(deadNodeInfo)
- && prevDNInfo.equals(currDNInfo));
+ // currDNInfo has to be different unless first block locs is different
+ assertFalse(
+ firstBlkDNInfos[0].equals(deadNodeInfo)
+ && prevDNInfo.equals(currDNInfo), "FSInputStream should pick a different DN");
firstIteration = false;
}
}
- assertEquals("InputStream exceeds expected position",
- secondBlockMark, fin.getPos());
+ assertEquals(
+ secondBlockMark, fin.getPos(), "InputStream exceeds expected position");
// restart the dead node with the same port
assertTrue(dfsCluster.restartDataNode(stoppedDNProps, true));
dfsCluster.waitActive();
@@ -244,13 +240,13 @@ public void testRead() throws Exception {
while (fin.getPos() < thirdBlockMark) {
bytesRead = fin.read(readBuffer);
if (this.enableBlkExpiration) {
- assertEquals("node is removed from deadNodes after 1st iteration",
- firstIteration, fin.deadNodesContain(deadNodeInfo));
+ assertEquals(
+ firstIteration, fin.deadNodesContain(deadNodeInfo), "node is removed from deadNodes after 1st iteration");
} else {
assertTrue(fin.deadNodesContain(deadNodeInfo));
}
- Assert.assertTrue("Unexpected number of read bytes",
- chunkReadSize >= bytesRead);
+ Assertions.assertTrue(
+ chunkReadSize >= bytesRead, "Unexpected number of read bytes");
prevDNInfo = currDNInfo;
currDNInfo = fin.getCurrentDatanode();
if (!this.enableBlkExpiration) {
@@ -266,8 +262,8 @@ public void testRead() throws Exception {
}
}
}
- assertEquals("InputStream exceeds expected position",
- thirdBlockMark, fin.getPos());
+ assertEquals(
+ thirdBlockMark, fin.getPos(), "InputStream exceeds expected position");
} finally {
if (fout != null) {
fout.close();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java
index e19f3281e207d..d0cd130ed5457 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.*;
import java.io.FileNotFoundException;
import java.io.IOException;
@@ -30,7 +30,7 @@
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.util.Time;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
/**
* This class tests that the DFS command mkdirs only creates valid
@@ -106,10 +106,10 @@ public void testMkdir() throws IOException {
} catch (IOException e) {
expectedException = e;
}
- assertTrue("Create a directory when parent dir exists as file using"
- + " mkdir() should throw ParentNotDirectoryException ",
- expectedException != null
- && expectedException instanceof ParentNotDirectoryException);
+ assertTrue(
+ expectedException != null
+ && expectedException instanceof ParentNotDirectoryException, "Create a directory when parent dir exists as file using"
+ + " mkdir() should throw ParentNotDirectoryException ");
// Create a dir in a non-exist directory, should fail
expectedException = null;
try {
@@ -118,10 +118,10 @@ public void testMkdir() throws IOException {
} catch (IOException e) {
expectedException = e;
}
- assertTrue("Create a directory in a non-exist parent dir using"
- + " mkdir() should throw FileNotFoundException ",
- expectedException != null
- && expectedException instanceof FileNotFoundException);
+ assertTrue(
+ expectedException != null
+ && expectedException instanceof FileNotFoundException, "Create a directory in a non-exist parent dir using"
+ + " mkdir() should throw FileNotFoundException ");
} finally {
dfs.close();
cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java
index 432ac8e9a2e0c..6867faad34aae 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java
@@ -57,22 +57,19 @@
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.test.Whitebox;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.Write.RECOVER_LEASE_ON_CLOSE_EXCEPTION_KEY;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
import static org.mockito.ArgumentMatchers.anyBoolean;
import static org.mockito.ArgumentMatchers.anyLong;
import org.mockito.Mockito;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
-import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.doThrow;
@@ -85,7 +82,7 @@
public class TestDFSOutputStream {
static MiniDFSCluster cluster;
- @BeforeClass
+ @BeforeAll
public static void setup() throws IOException {
Configuration conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
@@ -107,7 +104,7 @@ public void testCloseTwice() throws IOException {
LastExceptionInStreamer ex = (LastExceptionInStreamer) Whitebox
.getInternalState(streamer, "lastException");
Throwable thrown = (Throwable) Whitebox.getInternalState(ex, "thrown");
- Assert.assertNull(thrown);
+ Assertions.assertNull(thrown);
dos.close();
@@ -119,7 +116,7 @@ public void testCloseTwice() throws IOException {
assertEquals(e, dummy);
}
thrown = (Throwable) Whitebox.getInternalState(ex, "thrown");
- Assert.assertNull(thrown);
+ Assertions.assertNull(thrown);
dos.close();
}
@@ -145,10 +142,10 @@ public void testComputePacketChunkSize() throws Exception {
Field field = dos.getClass().getDeclaredField("packetSize");
field.setAccessible(true);
- Assert.assertTrue((Integer) field.get(dos) + 33 < packetSize);
+ Assertions.assertTrue((Integer) field.get(dos) + 33 < packetSize);
// If PKT_MAX_HEADER_LEN is 257, actual packet size come to over 64KB
// without a fix on HDFS-7308.
- Assert.assertTrue((Integer) field.get(dos) + 257 < packetSize);
+ Assertions.assertTrue((Integer) field.get(dos) + 257 < packetSize);
}
/**
@@ -246,21 +243,21 @@ private void runAdjustChunkBoundary(
final Field writePacketSizeField = dos.getClass()
.getDeclaredField("writePacketSize");
writePacketSizeField.setAccessible(true);
- Assert.assertEquals(writePacketSizeField.getInt(dos),
+ Assertions.assertEquals(writePacketSizeField.getInt(dos),
finalWritePacketSize);
/* get and verify chunksPerPacket */
final Field chunksPerPacketField = dos.getClass()
.getDeclaredField("chunksPerPacket");
chunksPerPacketField.setAccessible(true);
- Assert.assertEquals(chunksPerPacketField.getInt(dos),
+ Assertions.assertEquals(chunksPerPacketField.getInt(dos),
(finalWritePacketSize - packateMaxHeaderLength) / chunkSize);
/* get and verify packetSize */
final Field packetSizeField = dos.getClass()
.getDeclaredField("packetSize");
packetSizeField.setAccessible(true);
- Assert.assertEquals(packetSizeField.getInt(dos),
+ Assertions.assertEquals(packetSizeField.getInt(dos),
chunksPerPacketField.getInt(dos) * chunkSize);
} finally {
if (dfsCluster != null) {
@@ -297,7 +294,7 @@ public void testCongestionBackoff() throws IOException {
DFSPacket packet = mock(DFSPacket.class);
dataQueue.add(packet);
stream.run();
- Assert.assertTrue(congestedNodes.isEmpty());
+ Assertions.assertTrue(congestedNodes.isEmpty());
}
@Test
@@ -359,11 +356,11 @@ public void testEndLeaseCall() throws Exception {
public void testStreamFlush() throws Exception {
FileSystem fs = cluster.getFileSystem();
FSDataOutputStream os = fs.create(new Path("/normal-file"));
- // Verify output stream supports hsync() and hflush().
- assertTrue("DFSOutputStream should support hflush()!",
- os.hasCapability(StreamCapability.HFLUSH.getValue()));
- assertTrue("DFSOutputStream should support hsync()!",
- os.hasCapability(StreamCapability.HSYNC.getValue()));
+ // Verify output stream supports hsync() and hflush().
+ assertTrue(
+ os.hasCapability(StreamCapability.HFLUSH.getValue()), "DFSOutputStream should support hflush()!");
+ assertTrue(
+ os.hasCapability(StreamCapability.HSYNC.getValue()), "DFSOutputStream should support hsync()!");
byte[] bytes = new byte[1024];
InputStream is = new ByteArrayInputStream(bytes);
IOUtils.copyBytes(is, os, bytes.length);
@@ -422,7 +419,7 @@ public void testExceptionInCloseWithoutRecoverLease() throws Exception {
}
}
- @AfterClass
+ @AfterAll
public static void tearDown() {
if (cluster != null) {
cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
index 15ce06b69fb6f..d813b2d281947 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
@@ -17,10 +17,7 @@
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
import java.io.DataOutputStream;
import java.io.FileNotFoundException;
@@ -45,9 +42,9 @@
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Time;
-import org.junit.After;
-import org.junit.Before;
import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
/** Unit tests for permission */
public class TestDFSPermission {
@@ -115,13 +112,13 @@ public class TestDFSPermission {
}
}
- @Before
+ @BeforeEach
public void setUp() throws IOException {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
}
- @After
+ @AfterEach
public void tearDown() throws IOException {
if (cluster != null) {
cluster.shutdown();
@@ -185,7 +182,7 @@ private void testPermissionSetting(OpType op) throws Exception {
// case 5: test non-existent parent directory
uMask = DEFAULT_UMASK;
initFileSystem(uMask);
- assertFalse("File shouldn't exists", fs.exists(NON_EXISTENT_PATH));
+ assertFalse(fs.exists(NON_EXISTENT_PATH), "File shouldn't exists");
createAndCheckPermission(op, NON_EXISTENT_PATH, uMask, new FsPermission(
DEFAULT_PERMISSION), false);
Path parent = NON_EXISTENT_PATH.getParent();
@@ -324,8 +321,8 @@ public void testTrashPermission() throws Exception {
fail("User2 should not be allowed to delete user1's dir.");
} catch (AccessControlException e) {
e.printStackTrace();
- assertTrue("Permission denied messages must carry the username",
- e.getMessage().contains(USER2_NAME));
+ assertTrue(
+ e.getMessage().contains(USER2_NAME), "Permission denied messages must carry the username");
}
// ensure the /BSS/user1 still exists
@@ -357,8 +354,8 @@ public void testTrashPermission() throws Exception {
// expect the exception is caused by permission denied
assertTrue(e.getCause() instanceof AccessControlException);
e.printStackTrace();
- assertTrue("Permission denied messages must carry the username",
- e.getCause().getMessage().contains(USER2_NAME));
+ assertTrue(
+ e.getCause().getMessage().contains(USER2_NAME), "Permission denied messages must carry the username");
}
// ensure /BSS/user1 still exists
@@ -540,11 +537,11 @@ public FileSystem run() throws Exception {
fs.access(p1, FsAction.WRITE);
fail("The access call should have failed.");
} catch (AccessControlException e) {
- assertTrue("Permission denied messages must carry the username",
- e.getMessage().contains(USER1_NAME));
- assertTrue("Permission denied messages must carry the path parent",
- e.getMessage().contains(
- p1.getParent().toUri().getPath()));
+ assertTrue(
+ e.getMessage().contains(USER1_NAME), "Permission denied messages must carry the username");
+ assertTrue(
+ e.getMessage().contains(
+ p1.getParent().toUri().getPath()), "Permission denied messages must carry the path parent");
}
Path badPath = new Path("/bad/bad");
@@ -574,11 +571,11 @@ public FileSystem run() throws Exception {
fs.access(p2, FsAction.EXECUTE);
fail("The access call should have failed.");
} catch (AccessControlException e) {
- assertTrue("Permission denied messages must carry the username",
- e.getMessage().contains(USER1_NAME));
- assertTrue("Permission denied messages must carry the path parent",
- e.getMessage().contains(
- p2.getParent().toUri().getPath()));
+ assertTrue(
+ e.getMessage().contains(USER1_NAME), "Permission denied messages must carry the username");
+ assertTrue(
+ e.getMessage().contains(
+ p2.getParent().toUri().getPath()), "Permission denied messages must carry the path parent");
}
}
@@ -599,11 +596,11 @@ public FileSystem run() throws Exception {
fs.access(p3, FsAction.READ_WRITE);
fail("The access call should have failed.");
} catch (AccessControlException e) {
- assertTrue("Permission denied messages must carry the username",
- e.getMessage().contains(USER1_NAME));
- assertTrue("Permission denied messages must carry the path parent",
- e.getMessage().contains(
- p3.getParent().toUri().getPath()));
+ assertTrue(
+ e.getMessage().contains(USER1_NAME), "Permission denied messages must carry the username");
+ assertTrue(
+ e.getMessage().contains(
+ p3.getParent().toUri().getPath()), "Permission denied messages must carry the path parent");
}
}
@@ -636,11 +633,11 @@ public FileSystem run() throws Exception {
fs.exists(nfpath);
fail("The exists call should have failed.");
} catch (AccessControlException e) {
- assertTrue("Permission denied messages must carry file path",
- e.getMessage().contains(fpath.getName()));
- assertTrue("Permission denied messages must specify existing_file is not "
- + "a directory, when checked on /existing_file/non_existing_name",
- e.getMessage().contains("is not a directory"));
+ assertTrue(
+ e.getMessage().contains(fpath.getName()), "Permission denied messages must carry file path");
+ assertTrue(
+ e.getMessage().contains("is not a directory"), "Permission denied messages must specify existing_file is not "
+ + "a directory, when checked on /existing_file/non_existing_name");
}
rootFs.setPermission(p4, new FsPermission("600"));
@@ -648,13 +645,13 @@ public FileSystem run() throws Exception {
fs.exists(nfpath);
fail("The exists call should have failed.");
} catch (AccessControlException e) {
- assertFalse("Permission denied messages must not carry full file path,"
- + "since the user does not have permission on /p4: "
- + e.getMessage(),
- e.getMessage().contains(fpath.getName()));
- assertFalse("Permission denied messages must not specify /p4"
- + " is not a directory: " + e.getMessage(),
- e.getMessage().contains("is not a directory"));
+ assertFalse(
+ e.getMessage().contains(fpath.getName()), "Permission denied messages must not carry full file path,"
+ + "since the user does not have permission on /p4: "
+ + e.getMessage());
+ assertFalse(
+ e.getMessage().contains("is not a directory"), "Permission denied messages must not specify /p4"
+ + " is not a directory: " + e.getMessage());
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRemove.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRemove.java
index 7630dd650cebc..7aef45cf8eae6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRemove.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRemove.java
@@ -16,8 +16,8 @@
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.DataOutputStream;
import java.io.IOException;
@@ -28,7 +28,7 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
public class TestDFSRemove {
final Path dir = new Path("/test/remove/");
@@ -80,8 +80,8 @@ public void testRemove() throws Exception {
Thread.sleep(3 * DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT * 1000);
// all blocks should be gone now.
long dfsUsedFinal = getTotalDfsUsed(cluster);
- assertEquals("All blocks should be gone. start=" + dfsUsedStart
- + " max=" + dfsUsedMax + " final=" + dfsUsedFinal, dfsUsedStart, dfsUsedFinal);
+ assertEquals(dfsUsedStart, dfsUsedFinal, "All blocks should be gone. start=" + dfsUsedStart
+ + " max=" + dfsUsedMax + " final=" + dfsUsedFinal);
}
fs.delete(dir, true);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java
index fe2eee28b751e..2de42db77ad82 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java
@@ -16,9 +16,7 @@
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
import java.io.DataOutputStream;
import java.io.IOException;
@@ -191,9 +189,9 @@ public void testRename2Options() throws Exception {
dfs.rename(path, new Path("/dir1"),
new Rename[] {Rename.OVERWRITE, Rename.TO_TRASH});
String auditOut = auditLog.getOutput();
- assertTrue("Rename should have both OVERWRITE and TO_TRASH "
- + "flags at namenode but had only " + auditOut,
- auditOut.contains("options=[OVERWRITE, TO_TRASH]"));
+ assertTrue(
+ auditOut.contains("options=[OVERWRITE, TO_TRASH]"), "Rename should have both OVERWRITE and TO_TRASH "
+ + "flags at namenode but had only " + auditOut);
}
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java
index bcb37e340f605..2f24671505322 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java
@@ -19,7 +19,7 @@
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE;
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE;
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.*;
import java.io.File;
import java.io.IOException;
@@ -38,16 +38,15 @@
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.util.StringUtils;
-import org.junit.After;
-import org.junit.Test;
-
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Test;
import org.apache.hadoop.thirdparty.com.google.common.base.Charsets;
/**
-* This test ensures the appropriate response (successful or failure) from
-* the system when the system is rolled back under various storage state and
-* version conditions.
-*/
+ * This test ensures the appropriate response (successful or failure) from
+ * the system when the system is rolled back under various storage state and
+ * version conditions.
+ */
public class TestDFSRollback {
private static final Logger LOG = LoggerFactory.getLogger(
@@ -127,8 +126,8 @@ void startNameNodeShouldFail(String searchString) {
void startBlockPoolShouldFail(StartupOption operation, String bpid)
throws IOException {
cluster.startDataNodes(conf, 1, false, operation, null); // should fail
- assertFalse("Block pool " + bpid + " should have failed to start",
- cluster.getDataNodes().get(0).isBPServiceAlive(bpid));
+ assertFalse(
+ cluster.getDataNodes().get(0).isBPServiceAlive(bpid), "Block pool " + bpid + " should have failed to start");
}
/**
@@ -344,7 +343,7 @@ private void deleteMatchingFiles(File[] baseDirs, String regex) {
}
}
- @After
+ @AfterEach
public void tearDown() throws Exception {
LOG.info("Shutting down MiniDFSCluster");
if (cluster != null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
index 0816c3f74fa22..6a523ac9970f3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
@@ -41,6 +41,9 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.fs.permission.AclEntry;
@@ -67,10 +70,7 @@
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.ToolRunner;
import org.junit.rules.Timeout;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
import org.junit.Rule;
-import org.junit.Assert;
import org.slf4j.event.Level;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
@@ -81,7 +81,8 @@
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.CoreMatchers.not;
-import static org.junit.Assert.*;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.jupiter.api.Assertions.*;
import static org.hamcrest.core.StringContains.containsString;
/**
@@ -106,7 +107,7 @@ public class TestDFSShell {
private static MiniDFSCluster miniCluster;
private static DistributedFileSystem dfs;
- @BeforeClass
+ @BeforeAll
public static void setup() throws IOException {
final Configuration conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
@@ -124,7 +125,7 @@ public static void setup() throws IOException {
dfs = miniCluster.getFileSystem();
}
- @AfterClass
+ @AfterAll
public static void tearDown() {
if (miniCluster != null) {
miniCluster.shutdown(true, true);
@@ -321,7 +322,7 @@ public void testDu() throws IOException {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
}
- assertEquals("Return code should be 0.", 0, val);
+ assertEquals(0, val, "Return code should be 0.");
returnString = out.toString();
out.reset();
assertTrue(returnString.contains("1 2 " + myFile3.toString()));
@@ -641,74 +642,74 @@ public void testErrOutPut() throws Exception {
argv[0] = "-cat";
argv[1] = root.toUri().getPath();
int ret = ToolRunner.run(new FsShell(), argv);
- assertEquals(" -cat returned 1 ", 1, ret);
+ assertEquals(1, ret, " -cat returned 1 ");
String returned = out.toString();
- assertTrue("cat does not print exceptions ",
- (returned.lastIndexOf("Exception") == -1));
+ assertTrue(
+ (returned.lastIndexOf("Exception") == -1), "cat does not print exceptions ");
out.reset();
argv[0] = "-rm";
argv[1] = root.toString();
FsShell shell = new FsShell(dfs.getConf());
ret = ToolRunner.run(shell, argv);
- assertEquals(" -rm returned 1 ", 1, ret);
+ assertEquals(1, ret, " -rm returned 1 ");
returned = out.toString();
out.reset();
- assertTrue("rm prints reasonable error ",
- (returned.lastIndexOf("No such file or directory") != -1));
+ assertTrue(
+ (returned.lastIndexOf("No such file or directory") != -1), "rm prints reasonable error ");
argv[0] = "-rmr";
argv[1] = root.toString();
ret = ToolRunner.run(shell, argv);
- assertEquals(" -rmr returned 1", 1, ret);
+ assertEquals(1, ret, " -rmr returned 1");
returned = out.toString();
- assertTrue("rmr prints reasonable error ",
- (returned.lastIndexOf("No such file or directory") != -1));
+ assertTrue(
+ (returned.lastIndexOf("No such file or directory") != -1), "rmr prints reasonable error ");
out.reset();
argv[0] = "-du";
argv[1] = "/nonexistentfile";
ret = ToolRunner.run(shell, argv);
returned = out.toString();
- assertTrue(" -du prints reasonable error ",
- (returned.lastIndexOf("No such file or directory") != -1));
+ assertTrue(
+ (returned.lastIndexOf("No such file or directory") != -1), " -du prints reasonable error ");
out.reset();
argv[0] = "-dus";
argv[1] = "/nonexistentfile";
ret = ToolRunner.run(shell, argv);
returned = out.toString();
- assertTrue(" -dus prints reasonable error",
- (returned.lastIndexOf("No such file or directory") != -1));
+ assertTrue(
+ (returned.lastIndexOf("No such file or directory") != -1), " -dus prints reasonable error");
out.reset();
argv[0] = "-ls";
argv[1] = "/nonexistenfile";
ret = ToolRunner.run(shell, argv);
returned = out.toString();
- assertTrue(" -ls does not return Found 0 items",
- (returned.lastIndexOf("Found 0") == -1));
+ assertTrue(
+ (returned.lastIndexOf("Found 0") == -1), " -ls does not return Found 0 items");
out.reset();
argv[0] = "-ls";
argv[1] = "/nonexistentfile";
ret = ToolRunner.run(shell, argv);
- assertEquals(" -lsr should fail ", 1, ret);
+ assertEquals(1, ret, " -lsr should fail ");
out.reset();
dfs.mkdirs(new Path("/testdir"));
argv[0] = "-ls";
argv[1] = "/testdir";
ret = ToolRunner.run(shell, argv);
returned = out.toString();
- assertTrue(" -ls does not print out anything ",
- (returned.lastIndexOf("Found 0") == -1));
+ assertTrue(
+ (returned.lastIndexOf("Found 0") == -1), " -ls does not print out anything ");
out.reset();
argv[0] = "-ls";
argv[1] = "/user/nonxistant/*";
ret = ToolRunner.run(shell, argv);
- assertEquals(" -ls on nonexistent glob returns 1", 1, ret);
+ assertEquals(1, ret, " -ls on nonexistent glob returns 1");
out.reset();
argv[0] = "-mkdir";
argv[1] = "/testdir";
ret = ToolRunner.run(shell, argv);
returned = out.toString();
- assertEquals(" -mkdir returned 1 ", 1, ret);
- assertTrue(" -mkdir returned File exists",
- (returned.lastIndexOf("File exists") != -1));
+ assertEquals(1, ret, " -mkdir returned 1 ");
+ assertTrue(
+ (returned.lastIndexOf("File exists") != -1), " -mkdir returned File exists");
Path testFile = new Path("/testfile");
OutputStream outtmp = dfs.create(testFile);
outtmp.write(testFile.toString().getBytes());
@@ -718,24 +719,24 @@ public void testErrOutPut() throws Exception {
argv[1] = "/testfile";
ret = ToolRunner.run(shell, argv);
returned = out.toString();
- assertEquals(" -mkdir returned 1", 1, ret);
- assertTrue(" -mkdir returned this is a file ",
- (returned.lastIndexOf("not a directory") != -1));
+ assertEquals(1, ret, " -mkdir returned 1");
+ assertTrue(
+ (returned.lastIndexOf("not a directory") != -1), " -mkdir returned this is a file ");
out.reset();
argv[0] = "-mkdir";
argv[1] = "/testParent/testChild";
ret = ToolRunner.run(shell, argv);
returned = out.toString();
- assertEquals(" -mkdir returned 1", 1, ret);
- assertTrue(" -mkdir returned there is No file or directory but has testChild in the path",
- (returned.lastIndexOf("testChild") == -1));
+ assertEquals(1, ret, " -mkdir returned 1");
+ assertTrue(
+ (returned.lastIndexOf("testChild") == -1), " -mkdir returned there is No file or directory but has testChild in the path");
out.reset();
argv = new String[3];
argv[0] = "-mv";
argv[1] = "/testfile";
argv[2] = "/no-such-dir/file";
ret = ToolRunner.run(shell, argv);
- assertEquals("mv failed to rename", 1, ret);
+ assertEquals(1, ret, "mv failed to rename");
out.reset();
argv = new String[3];
argv[0] = "-mv";
@@ -743,25 +744,25 @@ public void testErrOutPut() throws Exception {
argv[2] = "/testfiletest";
ret = ToolRunner.run(shell, argv);
returned = out.toString();
- assertTrue("no output from rename",
- (returned.lastIndexOf("Renamed") == -1));
+ assertTrue(
+ (returned.lastIndexOf("Renamed") == -1), "no output from rename");
out.reset();
argv[0] = "-mv";
argv[1] = "/testfile";
argv[2] = "/testfiletmp";
ret = ToolRunner.run(shell, argv);
returned = out.toString();
- assertTrue(" unix like output",
- (returned.lastIndexOf("No such file or") != -1));
+ assertTrue(
+ (returned.lastIndexOf("No such file or") != -1), " unix like output");
out.reset();
argv = new String[1];
argv[0] = "-du";
dfs.mkdirs(dfs.getHomeDirectory());
ret = ToolRunner.run(shell, argv);
returned = out.toString();
- assertEquals(" no error ", 0, ret);
- assertTrue("empty path specified",
- (returned.lastIndexOf("empty string") == -1));
+ assertEquals(0, ret, " no error ");
+ assertTrue(
+ (returned.lastIndexOf("empty string") == -1), "empty path specified");
out.reset();
argv = new String[3];
argv[0] = "-test";
@@ -769,7 +770,7 @@ public void testErrOutPut() throws Exception {
argv[2] = "/no/such/dir";
ret = ToolRunner.run(shell, argv);
returned = out.toString();
- assertEquals(" -test -d wrong result ", 1, ret);
+ assertEquals(1, ret, " -test -d wrong result ");
assertTrue(returned.isEmpty());
} finally {
if (bak != null) {
@@ -802,7 +803,7 @@ public void testMoveWithTargetPortEmpty() throws Exception {
argv[1] = srcFs.getUri() + "/testfile";
argv[2] = "hdfs://" + srcFs.getUri().getHost() + "/testfile2";
int ret = ToolRunner.run(shell, argv);
- assertEquals("mv should have succeeded", 0, ret);
+ assertEquals(0, ret, "mv should have succeeded");
} finally {
if (cluster != null) {
cluster.shutdown();
@@ -831,19 +832,19 @@ public void testURIPaths() throws Exception {
argv[0] = "-ls";
argv[1] = dstFs.getUri().toString() + "/";
int ret = ToolRunner.run(shell, argv);
- assertEquals("ls works on remote uri ", 0, ret);
+ assertEquals(0, ret, "ls works on remote uri ");
//check for rm -r
dstFs.mkdirs(new Path("/hadoopdir"));
argv = new String[2];
argv[0] = "-rmr";
argv[1] = dstFs.getUri().toString() + "/hadoopdir";
ret = ToolRunner.run(shell, argv);
- assertEquals("-rmr works on remote uri " + argv[1], 0, ret);
+ assertEquals(0, ret, "-rmr works on remote uri " + argv[1]);
//check du
argv[0] = "-du";
argv[1] = dstFs.getUri().toString() + "/";
ret = ToolRunner.run(shell, argv);
- assertEquals("du works on remote uri ", 0, ret);
+ assertEquals(0, ret, "du works on remote uri ");
//check put
File furi = new File(TEST_ROOT_DIR, "furi");
createLocalFile(furi);
@@ -852,20 +853,20 @@ public void testURIPaths() throws Exception {
argv[1] = furi.toURI().toString();
argv[2] = dstFs.getUri().toString() + "/furi";
ret = ToolRunner.run(shell, argv);
- assertEquals(" put is working ", 0, ret);
+ assertEquals(0, ret, " put is working ");
//check cp
argv[0] = "-cp";
argv[1] = dstFs.getUri().toString() + "/furi";
argv[2] = srcFs.getUri().toString() + "/furi";
ret = ToolRunner.run(shell, argv);
- assertEquals(" cp is working ", 0, ret);
+ assertEquals(0, ret, " cp is working ");
assertTrue(srcFs.exists(new Path("/furi")));
//check cat
argv = new String[2];
argv[0] = "-cat";
argv[1] = dstFs.getUri().toString() + "/furi";
ret = ToolRunner.run(shell, argv);
- assertEquals(" cat is working ", 0, ret);
+ assertEquals(0, ret, " cat is working ");
//check chown
dstFs.delete(new Path("/furi"), true);
dstFs.delete(new Path("/hadoopdir"), true);
@@ -882,15 +883,15 @@ public void testURIPaths() throws Exception {
argv[0] = "-cat";
argv[1] = "hdfs:///furi";
ret = ToolRunner.run(shell, argv);
- assertEquals(" default works for cat", 0, ret);
+ assertEquals(0, ret, " default works for cat");
argv[0] = "-ls";
argv[1] = "hdfs:///";
ret = ToolRunner.run(shell, argv);
- assertEquals("default works for ls ", 0, ret);
+ assertEquals(0, ret, "default works for ls ");
argv[0] = "-rmr";
argv[1] = "hdfs:///furi";
ret = ToolRunner.run(shell, argv);
- assertEquals("default works for rm/rmr", 0, ret);
+ assertEquals(0, ret, "default works for rm/rmr");
} finally {
if (null != srcCluster) {
srcCluster.shutdown();
@@ -919,12 +920,12 @@ public void testHead() throws Exception {
final String[] argv = new String[]{"-head", testFile.toString()};
final int ret = ToolRunner.run(new FsShell(dfs.getConf()), argv);
- assertEquals(Arrays.toString(argv) + " returned " + ret, 0, ret);
- assertEquals("-head returned " + out.size() + " bytes data, expected 1KB",
- 1024, out.size());
- // tailed out last 1KB of the file content
- assertArrayEquals("Head output doesn't match input",
- text.substring(0, 1024).getBytes(), out.toByteArray());
+ assertEquals(0, ret, Arrays.toString(argv) + " returned " + ret);
+ assertEquals(
+ 1024, out.size(), "-head returned " + out.size() + " bytes data, expected 1KB");
+ // tailed out last 1KB of the file content
+ assertArrayEquals(
+ text.substring(0, 1024).getBytes(), out.toByteArray(), "Head output doesn't match input");
out.reset();
}
@@ -946,12 +947,12 @@ public void testTail() throws Exception {
final String[] argv = new String[]{"-tail", testFile.toString()};
final int ret = ToolRunner.run(new FsShell(dfs.getConf()), argv);
- assertEquals(Arrays.toString(argv) + " returned " + ret, 0, ret);
- assertEquals("-tail returned " + out.size() + " bytes data, expected 1KB",
- 1024, out.size());
- // tailed out last 1KB of the file content
- assertArrayEquals("Tail output doesn't match input",
- text.substring(fileLen - 1024).getBytes(), out.toByteArray());
+ assertEquals(0, ret, Arrays.toString(argv) + " returned " + ret);
+ assertEquals(
+ 1024, out.size(), "-tail returned " + out.size() + " bytes data, expected 1KB");
+ // tailed out last 1KB of the file content
+ assertArrayEquals(
+ text.substring(fileLen - 1024).getBytes(), out.toByteArray(), "Tail output doesn't match input");
out.reset();
}
@@ -1038,9 +1039,9 @@ private void textTest(Path root, Configuration conf) throws Exception {
argv[0] = "-text";
argv[1] = new Path(root, "file.gz").toString();
int ret = ToolRunner.run(new FsShell(conf), argv);
- assertEquals("'-text " + argv[1] + " returned " + ret, 0, ret);
- assertTrue("Output doesn't match input",
- Arrays.equals(file.toByteArray(), out.toByteArray()));
+ assertEquals(0, ret, "'-text " + argv[1] + " returned " + ret);
+ assertTrue(
+ Arrays.equals(file.toByteArray(), out.toByteArray()), "Output doesn't match input");
// Create a sequence file with a gz extension, to test proper
// container detection. Magic detection.
@@ -1057,9 +1058,9 @@ private void textTest(Path root, Configuration conf) throws Exception {
argv[0] = "-text";
argv[1] = new Path(root, "file.gz").toString();
ret = ToolRunner.run(new FsShell(conf), argv);
- assertEquals("'-text " + argv[1] + " returned " + ret, 0, ret);
- assertTrue("Output doesn't match input",
- Arrays.equals("Foo\tBar\n".getBytes(), out.toByteArray()));
+ assertEquals(0, ret, "'-text " + argv[1] + " returned " + ret);
+ assertTrue(
+ Arrays.equals("Foo\tBar\n".getBytes(), out.toByteArray()), "Output doesn't match input");
out.reset();
// Test deflate. Extension-based detection.
@@ -1074,9 +1075,9 @@ private void textTest(Path root, Configuration conf) throws Exception {
argv[0] = "-text";
argv[1] = new Path(root, "file.deflate").toString();
ret = ToolRunner.run(new FsShell(conf), argv);
- assertEquals("'-text " + argv[1] + " returned " + ret, 0, ret);
- assertTrue("Output doesn't match input",
- Arrays.equals(outbytes, out.toByteArray()));
+ assertEquals(0, ret, "'-text " + argv[1] + " returned " + ret);
+ assertTrue(
+ Arrays.equals(outbytes, out.toByteArray()), "Output doesn't match input");
out.reset();
// Test a simple codec. Extension based detection. We use
@@ -1095,9 +1096,9 @@ private void textTest(Path root, Configuration conf) throws Exception {
argv[0] = "-text";
argv[1] = new Path(root, p).toString();
ret = ToolRunner.run(new FsShell(conf), argv);
- assertEquals("'-text " + argv[1] + " returned " + ret, 0, ret);
- assertTrue("Output doesn't match input",
- Arrays.equals(writebytes, out.toByteArray()));
+ assertEquals(0, ret, "'-text " + argv[1] + " returned " + ret);
+ assertTrue(
+ Arrays.equals(writebytes, out.toByteArray()), "Output doesn't match input");
out.reset();
// Test a plain text.
@@ -1111,9 +1112,9 @@ private void textTest(Path root, Configuration conf) throws Exception {
argv[0] = "-text";
argv[1] = new Path(root, "file.txt").toString();
ret = ToolRunner.run(new FsShell(conf), argv);
- assertEquals("'-text " + argv[1] + " returned " + ret, 0, ret);
- assertTrue("Output doesn't match input",
- Arrays.equals(writebytes, out.toByteArray()));
+ assertEquals(0, ret, "'-text " + argv[1] + " returned " + ret);
+ assertTrue(
+ Arrays.equals(writebytes, out.toByteArray()), "Output doesn't match input");
out.reset();
} finally {
if (null != bak) {
@@ -1142,7 +1143,7 @@ public void testChecksum() throws Exception {
assertTrue(out.toString().contains(StringUtils
.byteToHexString(checksum.getBytes(), 0, checksum.getLength())));
} finally {
- Assert.assertNotNull(printStream);
+ Assertions.assertNotNull(printStream);
System.setOut(printStream);
}
}
@@ -1167,22 +1168,22 @@ public void testCopyToLocal() throws IOException {
File localroot2 = new File(TEST_ROOT_DIR, "copyToLocal2");
File f1 = new File(localroot, "f1");
- assertTrue("Copying failed.", f1.isFile());
+ assertTrue(f1.isFile(), "Copying failed.");
File f2 = new File(localroot, "f2");
- assertTrue("Copying failed.", f2.isFile());
+ assertTrue(f2.isFile(), "Copying failed.");
File sub = new File(localroot, "sub");
- assertTrue("Copying failed.", sub.isDirectory());
+ assertTrue(sub.isDirectory(), "Copying failed.");
File f3 = new File(sub, "f3");
- assertTrue("Copying failed.", f3.isFile());
+ assertTrue(f3.isFile(), "Copying failed.");
File f4 = new File(sub, "f4");
- assertTrue("Copying failed.", f4.isFile());
+ assertTrue(f4.isFile(), "Copying failed.");
File f5 = new File(localroot2, "f1");
- assertTrue("Copying failed.", f5.isFile());
+ assertTrue(f5.isFile(), "Copying failed.");
f1.delete();
f2.delete();
@@ -1945,10 +1946,10 @@ public Object run() throws Exception {
args[0] = "-ls";
args[1] = "/foo";
int ret = ToolRunner.run(fshell, args);
- assertEquals("returned should be 1", 1, ret);
+ assertEquals(1, ret, "returned should be 1");
String str = out.toString();
- assertTrue("permission denied printed",
- str.indexOf("Permission denied") != -1);
+ assertTrue(
+ str.indexOf("Permission denied") != -1, "permission denied printed");
out.reset();
return null;
}
@@ -1989,7 +1990,7 @@ public String run(int exitcode, String... options) throws IOException {
try {
assertEquals(exitcode, shell.run(args));
} catch (Exception e) {
- assertTrue(StringUtils.stringifyException(e), false);
+ assertTrue(false, StringUtils.stringifyException(e));
}
return exitcode == 0? DFSTestUtil.readFile(new File(dst)): null;
}
@@ -2079,43 +2080,43 @@ public void testStat() throws Exception {
out.reset();
doFsStat(dfs.getConf(), null, testDir1);
- assertEquals("Unexpected -stat output: " + out,
- out.toString(), String.format("%s%n", mtime1));
+ assertEquals(
+ out.toString(), String.format("%s%n", mtime1), "Unexpected -stat output: " + out);
out.reset();
doFsStat(dfs.getConf(), null, testDir1, testFile2);
- assertEquals("Unexpected -stat output: " + out,
- out.toString(), String.format("%s%n%s%n", mtime1, mtime2));
+ assertEquals(
+ out.toString(), String.format("%s%n%s%n", mtime1, mtime2), "Unexpected -stat output: " + out);
doFsStat(dfs.getConf(), "%F %u:%g %b %y %n");
out.reset();
doFsStat(dfs.getConf(), "%F %a %A %u:%g %b %y %n", testDir1);
- assertTrue(out.toString(), out.toString().contains(mtime1));
- assertTrue(out.toString(), out.toString().contains("directory"));
- assertTrue(out.toString(), out.toString().contains(status1.getGroup()));
- assertTrue(out.toString(),
- out.toString().contains(status1.getPermission().toString()));
+ assertTrue(out.toString().contains(mtime1), out.toString());
+ assertTrue(out.toString().contains("directory"), out.toString());
+ assertTrue(out.toString().contains(status1.getGroup()), out.toString());
+ assertTrue(
+ out.toString().contains(status1.getPermission().toString()), out.toString());
int n = status1.getPermission().toShort();
int octal = (n>>>9&1)*1000 + (n>>>6&7)*100 + (n>>>3&7)*10 + (n&7);
- assertTrue(out.toString(),
- out.toString().contains(String.valueOf(octal)));
+ assertTrue(
+ out.toString().contains(String.valueOf(octal)), out.toString());
out.reset();
doFsStat(dfs.getConf(), "%F %a %A %u:%g %b %x %y %n", testDir1, testFile2);
n = status2.getPermission().toShort();
octal = (n>>>9&1)*1000 + (n>>>6&7)*100 + (n>>>3&7)*10 + (n&7);
- assertTrue(out.toString(), out.toString().contains(mtime1));
- assertTrue(out.toString(), out.toString().contains(atime1));
- assertTrue(out.toString(), out.toString().contains("regular file"));
- assertTrue(out.toString(),
- out.toString().contains(status2.getPermission().toString()));
- assertTrue(out.toString(),
- out.toString().contains(String.valueOf(octal)));
- assertTrue(out.toString(), out.toString().contains(mtime2));
- assertTrue(out.toString(), out.toString().contains(atime2));
+ assertTrue(out.toString().contains(mtime1), out.toString());
+ assertTrue(out.toString().contains(atime1), out.toString());
+ assertTrue(out.toString().contains("regular file"), out.toString());
+ assertTrue(
+ out.toString().contains(status2.getPermission().toString()), out.toString());
+ assertTrue(
+ out.toString().contains(String.valueOf(octal)), out.toString());
+ assertTrue(out.toString().contains(mtime2), out.toString());
+ assertTrue(out.toString().contains(atime2), out.toString());
}
private static void doFsStat(Configuration conf, String format, Path... files)
@@ -2123,8 +2124,8 @@ private static void doFsStat(Configuration conf, String format, Path... files)
if (files == null || files.length == 0) {
final String[] argv = (format == null ? new String[] {"-stat"} :
new String[] {"-stat", format});
- assertEquals("Should have failed with missing arguments",
- -1, ToolRunner.run(new FsShell(conf), argv));
+ assertEquals(
+ -1, ToolRunner.run(new FsShell(conf), argv), "Should have failed with missing arguments");
} else {
List argv = new LinkedList<>();
argv.add("-stat");
@@ -2136,7 +2137,7 @@ private static void doFsStat(Configuration conf, String format, Path... files)
}
int ret = ToolRunner.run(new FsShell(conf), argv.toArray(new String[0]));
- assertEquals(argv + " returned non-zero status " + ret, 0, ret);
+ assertEquals(0, ret, argv + " returned non-zero status " + ret);
}
}
@@ -2198,7 +2199,7 @@ public void testInvalidShell() throws Exception {
DFSAdmin admin = new DFSAdmin();
admin.setConf(conf);
int res = admin.run(new String[] {"-refreshNodes"});
- assertEquals("expected to fail -1", res , -1);
+ assertEquals(res, -1, "expected to fail -1");
}
// Preserve Copy Option is -ptopxa (timestamps, ownership, permission, XATTR,
@@ -2238,7 +2239,7 @@ public void testCopyCommandsWithPreserveOption() throws Exception {
String[] argv = new String[] { "-cp", "-p", src.toUri().toString(),
target1.toUri().toString() };
int ret = ToolRunner.run(shell, argv);
- assertEquals("cp -p is not working", SUCCESS, ret);
+ assertEquals(SUCCESS, ret, "cp -p is not working");
FileStatus targetStatus = dfs.getFileStatus(target1);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
@@ -2257,7 +2258,7 @@ public void testCopyCommandsWithPreserveOption() throws Exception {
argv = new String[] { "-cp", "-ptop", src.toUri().toString(),
target2.toUri().toString() };
ret = ToolRunner.run(shell, argv);
- assertEquals("cp -ptop is not working", SUCCESS, ret);
+ assertEquals(SUCCESS, ret, "cp -ptop is not working");
targetStatus = dfs.getFileStatus(target2);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
@@ -2276,7 +2277,7 @@ public void testCopyCommandsWithPreserveOption() throws Exception {
argv = new String[] { "-cp", "-ptopx", src.toUri().toString(),
target3.toUri().toString() };
ret = ToolRunner.run(shell, argv);
- assertEquals("cp -ptopx is not working", SUCCESS, ret);
+ assertEquals(SUCCESS, ret, "cp -ptopx is not working");
targetStatus = dfs.getFileStatus(target3);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
@@ -2297,7 +2298,7 @@ public void testCopyCommandsWithPreserveOption() throws Exception {
argv = new String[] { "-cp", "-ptopa", src.toUri().toString(),
target4.toUri().toString() };
ret = ToolRunner.run(shell, argv);
- assertEquals("cp -ptopa is not working", SUCCESS, ret);
+ assertEquals(SUCCESS, ret, "cp -ptopa is not working");
targetStatus = dfs.getFileStatus(target4);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
@@ -2317,7 +2318,7 @@ public void testCopyCommandsWithPreserveOption() throws Exception {
argv = new String[] { "-cp", "-ptoa", src.toUri().toString(),
target5.toUri().toString() };
ret = ToolRunner.run(shell, argv);
- assertEquals("cp -ptoa is not working", SUCCESS, ret);
+ assertEquals(SUCCESS, ret, "cp -ptoa is not working");
targetStatus = dfs.getFileStatus(target5);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
@@ -2460,7 +2461,7 @@ private Path doCopyAndTest(FsShell shell, Path dest, Path src,
new String[] { "-cp", cpArgs, src.toUri().toString(),
target.toUri().toString() };
final int ret = ToolRunner.run(shell, argv);
- assertEquals("cp -p is not working", expectedExitCode, ret);
+ assertEquals(expectedExitCode, ret, "cp -p is not working");
return target;
}
@@ -2469,16 +2470,16 @@ private void checkXAttrs(FileSystem fs, Path target, boolean expectRaw,
final Map xattrs = fs.getXAttrs(target);
int expectedCount = 0;
if (expectRaw) {
- assertArrayEquals("raw.a1 has incorrect value",
- RAW_A1_VALUE, xattrs.get(RAW_A1));
+ assertArrayEquals(
+ RAW_A1_VALUE, xattrs.get(RAW_A1), "raw.a1 has incorrect value");
expectedCount++;
}
if (expectVanillaXAttrs) {
- assertArrayEquals("user.a1 has incorrect value",
- USER_A1_VALUE, xattrs.get(USER_A1));
+ assertArrayEquals(
+ USER_A1_VALUE, xattrs.get(USER_A1), "user.a1 has incorrect value");
expectedCount++;
}
- assertEquals("xattrs size mismatch", expectedCount, xattrs.size());
+ assertEquals(expectedCount, xattrs.size(), "xattrs size mismatch");
}
// verify cp -ptopxa option will preserve directory attributes.
@@ -2529,7 +2530,7 @@ public void testCopyCommandsToDirectoryWithPreserveOption()
String[] argv = new String[] { "-cp", "-p", srcDir.toUri().toString(),
targetDir1.toUri().toString() };
int ret = ToolRunner.run(shell, argv);
- assertEquals("cp -p is not working", SUCCESS, ret);
+ assertEquals(SUCCESS, ret, "cp -p is not working");
FileStatus targetStatus = dfs.getFileStatus(targetDir1);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
@@ -2548,7 +2549,7 @@ public void testCopyCommandsToDirectoryWithPreserveOption()
argv = new String[] { "-cp", "-ptop", srcDir.toUri().toString(),
targetDir2.toUri().toString() };
ret = ToolRunner.run(shell, argv);
- assertEquals("cp -ptop is not working", SUCCESS, ret);
+ assertEquals(SUCCESS, ret, "cp -ptop is not working");
targetStatus = dfs.getFileStatus(targetDir2);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
@@ -2567,7 +2568,7 @@ public void testCopyCommandsToDirectoryWithPreserveOption()
argv = new String[] { "-cp", "-ptopx", srcDir.toUri().toString(),
targetDir3.toUri().toString() };
ret = ToolRunner.run(shell, argv);
- assertEquals("cp -ptopx is not working", SUCCESS, ret);
+ assertEquals(SUCCESS, ret, "cp -ptopx is not working");
targetStatus = dfs.getFileStatus(targetDir3);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
@@ -2588,7 +2589,7 @@ public void testCopyCommandsToDirectoryWithPreserveOption()
argv = new String[] { "-cp", "-ptopa", srcDir.toUri().toString(),
targetDir4.toUri().toString() };
ret = ToolRunner.run(shell, argv);
- assertEquals("cp -ptopa is not working", SUCCESS, ret);
+ assertEquals(SUCCESS, ret, "cp -ptopa is not working");
targetStatus = dfs.getFileStatus(targetDir4);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
@@ -2608,7 +2609,7 @@ public void testCopyCommandsToDirectoryWithPreserveOption()
argv = new String[] { "-cp", "-ptoa", srcDir.toUri().toString(),
targetDir5.toUri().toString() };
ret = ToolRunner.run(shell, argv);
- assertEquals("cp -ptoa is not working", SUCCESS, ret);
+ assertEquals(SUCCESS, ret, "cp -ptoa is not working");
targetStatus = dfs.getFileStatus(targetDir5);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
@@ -2666,7 +2667,7 @@ public void testCopyCommandsPreserveAclAndStickyBit() throws Exception {
String[] argv = new String[] { "-cp", "-p", src.toUri().toString(),
target1.toUri().toString() };
int ret = ToolRunner.run(shell, argv);
- assertEquals("cp is not working", SUCCESS, ret);
+ assertEquals(SUCCESS, ret, "cp is not working");
FileStatus targetStatus = dfs.getFileStatus(target1);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
@@ -2683,7 +2684,7 @@ public void testCopyCommandsPreserveAclAndStickyBit() throws Exception {
argv = new String[] { "-cp", "-ptopa", src.toUri().toString(),
target2.toUri().toString() };
ret = ToolRunner.run(shell, argv);
- assertEquals("cp -ptopa is not working", SUCCESS, ret);
+ assertEquals(SUCCESS, ret, "cp -ptopa is not working");
targetStatus = dfs.getFileStatus(target2);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
@@ -2720,33 +2721,33 @@ public void testCopyCommandsWithForceOption() throws Exception {
// Tests for put
String[] argv = new String[] { "-put", "-f", localfilepath, testdir };
int res = ToolRunner.run(shell, argv);
- assertEquals("put -f is not working", SUCCESS, res);
+ assertEquals(SUCCESS, res, "put -f is not working");
argv = new String[] { "-put", localfilepath, testdir };
res = ToolRunner.run(shell, argv);
- assertEquals("put command itself is able to overwrite the file", ERROR,
- res);
+ assertEquals(ERROR,
+ res, "put command itself is able to overwrite the file");
// Tests for copyFromLocal
argv = new String[] { "-copyFromLocal", "-f", localfilepath, testdir };
res = ToolRunner.run(shell, argv);
- assertEquals("copyFromLocal -f is not working", SUCCESS, res);
+ assertEquals(SUCCESS, res, "copyFromLocal -f is not working");
argv = new String[] { "-copyFromLocal", localfilepath, testdir };
res = ToolRunner.run(shell, argv);
- assertEquals(
- "copyFromLocal command itself is able to overwrite the file", ERROR,
- res);
+ assertEquals(ERROR,
+ res,
+ "copyFromLocal command itself is able to overwrite the file");
// Tests for cp
argv = new String[] { "-cp", "-f", localfilepath, testdir };
res = ToolRunner.run(shell, argv);
- assertEquals("cp -f is not working", SUCCESS, res);
+ assertEquals(SUCCESS, res, "cp -f is not working");
argv = new String[] { "-cp", localfilepath, testdir };
res = ToolRunner.run(shell, argv);
- assertEquals("cp command itself is able to overwrite the file", ERROR,
- res);
+ assertEquals(ERROR,
+ res, "cp command itself is able to overwrite the file");
} finally {
if (null != shell)
shell.close();
@@ -2788,18 +2789,18 @@ public void testCopyFromLocalWithPermissionDenied() throws Exception {
// Tests for put
String[] argv = new String[] { "-put", localfilepath, testdir };
int res = ToolRunner.run(shell, argv);
- assertEquals("put is working", ERROR, res);
+ assertEquals(ERROR, res, "put is working");
String returned = out.toString();
- assertTrue(" outputs Permission denied error message",
- (returned.lastIndexOf("Permission denied") != -1));
+ assertTrue(
+ (returned.lastIndexOf("Permission denied") != -1), " outputs Permission denied error message");
// Tests for copyFromLocal
argv = new String[] { "-copyFromLocal", localfilepath, testdir };
res = ToolRunner.run(shell, argv);
- assertEquals("copyFromLocal -f is working", ERROR, res);
+ assertEquals(ERROR, res, "copyFromLocal -f is working");
returned = out.toString();
- assertTrue(" outputs Permission denied error message",
- (returned.lastIndexOf("Permission denied") != -1));
+ assertTrue(
+ (returned.lastIndexOf("Permission denied") != -1), " outputs Permission denied error message");
} finally {
if (bak != null) {
@@ -2841,8 +2842,8 @@ public void testSetrepLow() throws Exception {
try {
final FileSystem fs = cluster.getFileSystem();
- assertTrue("Unable to create test directory",
- fs.mkdirs(new Path(testdir)));
+ assertTrue(
+ fs.mkdirs(new Path(testdir)), "Unable to create test directory");
fs.create(hdfsFile, true).close();
@@ -2856,18 +2857,18 @@ public void testSetrepLow() throws Exception {
final String[] argv = new String[] { "-setrep", "1", hdfsFile.toString() };
try {
- assertEquals("Command did not return the expected exit code",
- 1, shell.run(argv));
+ assertEquals(
+ 1, shell.run(argv), "Command did not return the expected exit code");
} finally {
System.setOut(origOut);
System.setErr(origErr);
}
- assertTrue("Error message is not the expected error message"
- + bao.toString(), bao.toString().startsWith(
- "setrep: Requested replication factor of 1 is less than "
- + "the required minimum of 2 for /tmp/TestDFSShell-"
- + "testSetrepLow/testFileForSetrepLow"));
+ assertTrue(bao.toString().startsWith(
+ "setrep: Requested replication factor of 1 is less than "
+ + "the required minimum of 2 for /tmp/TestDFSShell-"
+ + "testSetrepLow/testFileForSetrepLow"), "Error message is not the expected error message"
+ + bao.toString());
} finally {
shell.close();
cluster.shutdown();
@@ -2951,19 +2952,19 @@ private void deleteFileUsingTrash(
final String trashFile = shell.getCurrentTrashDir() + "/" + testFile;
String[] argv = new String[] { "-rm", testFile };
int res = ToolRunner.run(shell, argv);
- assertEquals("rm failed", 0, res);
+ assertEquals(0, res, "rm failed");
if (serverTrash) {
- // If the server config was set we should use it unconditionally
- assertTrue("File not in trash", fs.exists(new Path(trashFile)));
+ // If the server config was set we should use it unconditionally
+ assertTrue(fs.exists(new Path(trashFile)), "File not in trash");
} else if (clientTrash) {
- // If the server config was not set but the client config was
- // set then we should use it
- assertTrue("File not in trashed", fs.exists(new Path(trashFile)));
+ // If the server config was not set but the client config was
+ // set then we should use it
+ assertTrue(fs.exists(new Path(trashFile)), "File not in trashed");
} else {
- // If neither was set then we should not have trashed the file
- assertFalse("File was not removed", fs.exists(new Path(testFile)));
- assertFalse("File was trashed", fs.exists(new Path(trashFile)));
+ // If neither was set then we should not have trashed the file
+ assertFalse(fs.exists(new Path(testFile)), "File was not removed");
+ assertFalse(fs.exists(new Path(trashFile)), "File was trashed");
}
} finally {
if (fs != null) {
@@ -2993,8 +2994,8 @@ public void testAppendToFile() throws Exception {
try {
FileSystem dfs = cluster.getFileSystem();
- assertTrue("Not a HDFS: " + dfs.getUri(),
- dfs instanceof DistributedFileSystem);
+ assertTrue(
+ dfs instanceof DistributedFileSystem, "Not a HDFS: " + dfs.getUri());
// Run appendToFile once, make sure that the target file is
// created and is of the right size.
@@ -3063,10 +3064,10 @@ public void testSetXAttrPermission() throws Exception {
public Object run() throws Exception {
int ret = ToolRunner.run(fshell, new String[]{
"-setfattr", "-n", "user.a1", "-v", "1234", "/foo"});
- assertEquals("Returned should be 1", 1, ret);
+ assertEquals(1, ret, "Returned should be 1");
String str = out.toString();
- assertTrue("Permission denied printed",
- str.indexOf("Permission denied") != -1);
+ assertTrue(
+ str.indexOf("Permission denied") != -1, "Permission denied printed");
out.reset();
return null;
}
@@ -3074,7 +3075,7 @@ public Object run() throws Exception {
int ret = ToolRunner.run(fshell, new String[]{
"-setfattr", "-n", "user.a1", "-v", "1234", "/foo"});
- assertEquals("Returned should be 0", 0, ret);
+ assertEquals(0, ret, "Returned should be 0");
out.reset();
// No permission to read and remove
@@ -3085,18 +3086,18 @@ public Object run() throws Exception {
// Read
int ret = ToolRunner.run(fshell, new String[]{
"-getfattr", "-n", "user.a1", "/foo"});
- assertEquals("Returned should be 1", 1, ret);
+ assertEquals(1, ret, "Returned should be 1");
String str = out.toString();
- assertTrue("Permission denied printed",
- str.indexOf("Permission denied") != -1);
+ assertTrue(
+ str.indexOf("Permission denied") != -1, "Permission denied printed");
out.reset();
// Remove
ret = ToolRunner.run(fshell, new String[]{
"-setfattr", "-x", "user.a1", "/foo"});
- assertEquals("Returned should be 1", 1, ret);
+ assertEquals(1, ret, "Returned should be 1");
str = out.toString();
- assertTrue("Permission denied printed",
- str.indexOf("Permission denied") != -1);
+ assertTrue(
+ str.indexOf("Permission denied") != -1, "Permission denied printed");
out.reset();
return null;
}
@@ -3183,8 +3184,8 @@ private void doSetXattr(ByteArrayOutputStream out, FsShell fshell,
("Incorrect results from getfattr. Expected: ");
sb.append(expect).append(" Full Result: ");
sb.append(str);
- assertTrue(sb.toString(),
- str.indexOf(expect) != -1);
+ assertTrue(
+ str.indexOf(expect) != -1, sb.toString());
}
for (int i = 0; i < dontExpectArr.length; i++) {
@@ -3193,8 +3194,8 @@ private void doSetXattr(ByteArrayOutputStream out, FsShell fshell,
("Incorrect results from getfattr. Didn't Expect: ");
sb.append(dontExpect).append(" Full Result: ");
sb.append(str);
- assertTrue(sb.toString(),
- str.indexOf(dontExpect) == -1);
+ assertTrue(
+ str.indexOf(dontExpect) == -1, sb.toString());
}
out.reset();
}
@@ -3250,7 +3251,7 @@ public void testSetXAttrPermissionAsDifferentOwner() throws Exception {
public Object run() throws Exception {
final int ret = ToolRunner.run(fshell, new String[]{
"-mkdir", root + "/foo"});
- assertEquals("Return should be 0", 0, ret);
+ assertEquals(0, ret, "Return should be 0");
out.reset();
return null;
}
@@ -3263,7 +3264,7 @@ public Object run() throws Exception {
// Give access to "other"
final int ret = ToolRunner.run(fshell, new String[]{
"-chmod", "707", root + "/foo"});
- assertEquals("Return should be 0", 0, ret);
+ assertEquals(0, ret, "Return should be 0");
out.reset();
return null;
}
@@ -3276,7 +3277,7 @@ public Object run() throws Exception {
public Object run() throws Exception {
final int ret = ToolRunner.run(fshell, new String[]{
"-setfattr", "-n", "user.a1", "-v", "1234", root + "/foo"});
- assertEquals("Returned should be 0", 0, ret);
+ assertEquals(0, ret, "Returned should be 0");
out.reset();
return null;
}
@@ -3289,7 +3290,7 @@ public Object run() throws Exception {
public Object run() throws Exception {
final int ret = ToolRunner.run(fshell, new String[]{
"-setfattr", "-n", "user.a1", "-v", "1234", root + "/foo"});
- assertEquals("Returned should be 0", 0, ret);
+ assertEquals(0, ret, "Returned should be 0");
out.reset();
return null;
}
@@ -3303,12 +3304,12 @@ public Object run() throws Exception {
// Read
int ret = ToolRunner.run(fshell, new String[] { "-getfattr", "-n",
"user.a1", root + "/foo" });
- assertEquals("Returned should be 0", 0, ret);
+ assertEquals(0, ret, "Returned should be 0");
out.reset();
// Remove
ret = ToolRunner.run(fshell, new String[] { "-setfattr", "-x",
"user.a1", root + "/foo" });
- assertEquals("Returned should be 0", 0, ret);
+ assertEquals(0, ret, "Returned should be 0");
out.reset();
return null;
}
@@ -3330,7 +3331,7 @@ public Object run() throws Exception {
// Give access to "other"
final int ret = ToolRunner.run(fshell, new String[]{
"-chmod", "700", root + "/foo"});
- assertEquals("Return should be 0", 0, ret);
+ assertEquals(0, ret, "Return should be 0");
out.reset();
return null;
}
@@ -3344,10 +3345,10 @@ public Object run() throws Exception {
// set
int ret = ToolRunner.run(fshell, new String[] { "-setfattr", "-n",
"user.a2", root + "/foo" });
- assertEquals("Returned should be 1", 1, ret);
+ assertEquals(1, ret, "Returned should be 1");
final String str = out.toString();
- assertTrue("Permission denied printed",
- str.indexOf("Permission denied") != -1);
+ assertTrue(
+ str.indexOf("Permission denied") != -1, "Permission denied printed");
out.reset();
return null;
}
@@ -3361,10 +3362,10 @@ public Object run() throws Exception {
// set
int ret = ToolRunner.run(fshell, new String[] { "-setfattr", "-x",
"user.a2", root + "/foo" });
- assertEquals("Returned should be 1", 1, ret);
+ assertEquals(1, ret, "Returned should be 1");
final String str = out.toString();
- assertTrue("Permission denied printed",
- str.indexOf("Permission denied") != -1);
+ assertTrue(
+ str.indexOf("Permission denied") != -1, "Permission denied printed");
out.reset();
return null;
}
@@ -3377,7 +3378,7 @@ public Object run() throws Exception {
// set
int ret = ToolRunner.run(fshell, new String[] { "-setfattr", "-n",
"trusted.a3", root + "/foo" });
- assertEquals("Returned should be 0", 0, ret);
+ assertEquals(0, ret, "Returned should be 0");
out.reset();
return null;
}
@@ -3415,7 +3416,7 @@ public void testGetFAttrErrors() throws Exception {
{
final int ret = ToolRunner.run(fshell, new String[] {
"-setfattr", "-n", "user.a1", "-v", "1234", p.toString()});
- assertEquals("Returned should be 0", 0, ret);
+ assertEquals(0, ret, "Returned should be 0");
out.reset();
}
@@ -3425,8 +3426,8 @@ public Object run() throws Exception {
int ret = ToolRunner.run(fshell, new String[] {
"-getfattr", "-n", "user.a1", p.toString()});
String str = out.toString();
- assertTrue("xattr value was incorrectly returned",
- str.indexOf("1234") == -1);
+ assertTrue(
+ str.indexOf("1234") == -1, "xattr value was incorrectly returned");
out.reset();
return null;
}
@@ -3436,10 +3437,10 @@ public Object run() throws Exception {
final int ret = ToolRunner.run(fshell, new String[]{
"-getfattr", "-n", "user.nonexistent", p.toString()});
String str = out.toString();
- assertTrue("xattr value was incorrectly returned",
- str.indexOf(
- "getfattr: At least one of the attributes provided was not found")
- >= 0);
+ assertTrue(
+ str.indexOf(
+ "getfattr: At least one of the attributes provided was not found")
+ >= 0, "xattr value was incorrectly returned");
out.reset();
}
} finally {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShellGenericOptions.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShellGenericOptions.java
index 282dcf7b0066c..fd638944d5742 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShellGenericOptions.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShellGenericOptions.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.File;
import java.io.FileNotFoundException;
@@ -29,7 +29,7 @@
import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.util.ToolRunner;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
public class TestDFSShellGenericOptions {
@@ -103,8 +103,8 @@ private void execute(String [] args, String namenode) {
ToolRunner.run(shell, args);
fs = FileSystem.get(DFSUtilClient.getNNUri(
DFSUtilClient.getNNAddress(namenode)), shell.getConf());
- assertTrue("Directory does not get created",
- fs.isDirectory(new Path("/data")));
+ assertTrue(
+ fs.isDirectory(new Path("/data")), "Directory does not get created");
fs.delete(new Path("/data"), true);
} catch (Exception e) {
System.err.println(e.getMessage());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java
index 860794c083e0e..0ae8aa73e6b74 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java
@@ -19,8 +19,8 @@
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE;
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.File;
@@ -33,8 +33,8 @@
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.datanode.DataNodeLayoutVersion;
-import org.junit.After;
import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
/**
* This test ensures the appropriate response (successful or failure) from
@@ -280,7 +280,7 @@ public void testVersions() throws Exception {
}
}
- @After
+ @AfterEach
public void tearDown() throws Exception {
LOG.info("Shutting down MiniDFSCluster");
if (cluster != null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java
index 60839dc17bd1b..aa496696425e4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java
@@ -19,10 +19,7 @@
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE;
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
import java.io.File;
import java.io.IOException;
@@ -33,15 +30,15 @@
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
/**
-* This test ensures the appropriate response (successful or failure) from
-* the system when the system is started under various storage state and
-* version conditions.
-*/
+ * This test ensures the appropriate response (successful or failure) from
+ * the system when the system is started under various storage state and
+ * version conditions.
+ */
public class TestDFSStorageStateRecovery {
private static final Logger LOG = LoggerFactory.getLogger(
@@ -443,13 +440,13 @@ public void testBlockPoolStorageStates() throws Exception {
} // end numDirs loop
}
- @Before
+ @BeforeEach
public void setUp() throws Exception {
LOG.info("Setting up the directory structures.");
UpgradeUtilities.initialize();
}
- @After
+ @AfterEach
public void tearDown() throws Exception {
LOG.info("Shutting down MiniDFSCluster");
if (cluster != null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
index aedea3c8acde4..c72ff01b78b7d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
@@ -40,11 +40,11 @@
import org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory;
import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
import org.junit.rules.Timeout;
import java.io.IOException;
@@ -54,11 +54,7 @@
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
import static org.mockito.ArgumentMatchers.anyLong;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.spy;
@@ -88,7 +84,7 @@ public ErasureCodingPolicy getEcPolicy() {
return StripedFileTestUtil.getDefaultECPolicy();
}
- @Before
+ @BeforeEach
public void setup() throws IOException {
/*
* Initialize erasure coding policy.
@@ -128,7 +124,7 @@ private void startUp() throws IOException {
.setErasureCodingPolicy(dirPath.toString(), ecPolicy.getName());
}
- @After
+ @AfterEach
public void tearDown() {
if (cluster != null) {
cluster.shutdown();
@@ -214,9 +210,9 @@ public void testPread() throws Exception {
int ret = in.read(startOffset, buf, 0, fileLen);
assertEquals(remaining, ret);
for (int i = 0; i < remaining; i++) {
- Assert.assertEquals("Byte at " + (startOffset + i) + " should be the " +
- "same",
- expected[startOffset + i], buf[i]);
+ Assertions.assertEquals(
+ expected[startOffset + i], buf[i], "Byte at " + (startOffset + i) + " should be the " +
+ "same");
}
}
in.close();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
index 9044a6d0cb066..d83e0df7cfb6e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
@@ -18,9 +18,7 @@
package org.apache.hadoop.hdfs;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.Write.RECOVER_LEASE_ON_CLOSE_EXCEPTION_KEY;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
import static org.mockito.Mockito.doThrow;
import java.io.ByteArrayInputStream;
@@ -47,10 +45,10 @@
import org.apache.hadoop.io.erasurecode.ErasureCodeNative;
import org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory;
import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Before;
import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
import org.junit.rules.Timeout;
import org.slf4j.event.Level;
@@ -81,7 +79,7 @@ public ErasureCodingPolicy getEcPolicy() {
return StripedFileTestUtil.getDefaultECPolicy();
}
- @Before
+ @BeforeEach
public void setup() throws IOException {
/*
* Initialize erasure coding policy.
@@ -110,7 +108,7 @@ public void setup() throws IOException {
fs.getClient().setErasureCodingPolicy("/", ecPolicy.getName());
}
- @After
+ @AfterEach
public void tearDown() {
if (cluster != null) {
cluster.shutdown();
@@ -206,12 +204,12 @@ public void testStreamFlush() throws Exception {
final byte[] bytes = StripedFileTestUtil.generateBytes(blockSize *
dataBlocks * 3 + cellSize * dataBlocks + cellSize + 123);
try (FSDataOutputStream os = fs.create(new Path("/ec-file-1"))) {
- assertFalse(
- "DFSStripedOutputStream should not have hflush() capability yet!",
- os.hasCapability(StreamCapability.HFLUSH.getValue()));
- assertFalse(
- "DFSStripedOutputStream should not have hsync() capability yet!",
- os.hasCapability(StreamCapability.HSYNC.getValue()));
+ assertFalse(
+ os.hasCapability(StreamCapability.HFLUSH.getValue()),
+ "DFSStripedOutputStream should not have hflush() capability yet!");
+ assertFalse(
+ os.hasCapability(StreamCapability.HSYNC.getValue()),
+ "DFSStripedOutputStream should not have hsync() capability yet!");
try (InputStream is = new ByteArrayInputStream(bytes)) {
IOUtils.copyBytes(is, os, bytes.length);
os.hflush();
@@ -219,8 +217,8 @@ public void testStreamFlush() throws Exception {
os.hsync();
IOUtils.copyBytes(is, os, bytes.length);
}
- assertTrue("stream is not a DFSStripedOutputStream",
- os.getWrappedStream() instanceof DFSStripedOutputStream);
+ assertTrue(
+ os.getWrappedStream() instanceof DFSStripedOutputStream, "stream is not a DFSStripedOutputStream");
final DFSStripedOutputStream dfssos =
(DFSStripedOutputStream) os.getWrappedStream();
dfssos.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
@@ -265,8 +263,8 @@ public void testExceptionInCloseECFileWithRecoverLease() throws Exception {
spyClient.create("/testExceptionInCloseECFileWithRecoverLease",
FsPermission.getFileDefault(), EnumSet.of(CreateFlag.CREATE),
(short) 3, 1024*1024, null, 1024, null);
- assertTrue("stream should be a DFSStripedOutputStream",
- dfsOutputStream instanceof DFSStripedOutputStream);
+ assertTrue(
+ dfsOutputStream instanceof DFSStripedOutputStream, "stream should be a DFSStripedOutputStream");
DFSOutputStream spyDFSOutputStream = Mockito.spy(dfsOutputStream);
doThrow(new IOException("Emulated IOException in close"))
.when(spyDFSOutputStream).completeFile(Mockito.any());
@@ -290,8 +288,8 @@ public void testExceptionInCloseECFileWithoutRecoverLease() throws Exception {
spyClient.create("/testExceptionInCloseECFileWithoutRecoverLease",
FsPermission.getFileDefault(), EnumSet.of(CreateFlag.CREATE),
(short) 3, 1024*1024, null, 1024, null);
- assertTrue("stream should be a DFSStripedOutputStream",
- dfsOutputStream instanceof DFSStripedOutputStream);
+ assertTrue(
+ dfsOutputStream instanceof DFSStripedOutputStream, "stream should be a DFSStripedOutputStream");
DFSOutputStream spyDFSOutputStream = Mockito.spy(dfsOutputStream);
doThrow(new IOException("Emulated IOException in close"))
.when(spyDFSOutputStream).completeFile(Mockito.any());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
index ff521463f6d8c..0053f80dd0722 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
@@ -33,8 +33,8 @@
import java.util.ArrayList;
import java.util.Arrays;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Test striped file write operation with data node failures with fixed
@@ -105,7 +105,7 @@ public void testAddBlockWhenNoSufficientDataBlockNumOfNodes()
cluster.triggerHeartbeats();
DatanodeInfo[] info = dfs.getClient().datanodeReport(
DatanodeReportType.LIVE);
- assertEquals("Mismatches number of live Dns", numDatanodes, info.length);
+ assertEquals(numDatanodes, info.length, "Mismatches number of live Dns");
final Path dirFile = new Path(dir, "ecfile");
LambdaTestUtils.intercept(
IOException.class,
@@ -218,7 +218,7 @@ public void testAddBlockWhenNoSufficientParityNumOfNodes()
cluster.triggerHeartbeats();
DatanodeInfo[] info = dfs.getClient().datanodeReport(
DatanodeReportType.LIVE);
- assertEquals("Mismatches number of live Dns", numDatanodes, info.length);
+ assertEquals(numDatanodes, info.length, "Mismatches number of live Dns");
Path srcPath = new Path(dir, "testAddBlockWhenNoSufficientParityNodes");
int fileLength = cellSize - 1000;
final byte[] expected = StripedFileTestUtil.generateBytes(fileLength);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailureBase.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailureBase.java
index bbe991dacc781..2f2878c1b716e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailureBase.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailureBase.java
@@ -41,8 +41,8 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.event.Level;
-import org.junit.Assert;
-import org.junit.Before;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
import java.io.IOException;
import java.util.ArrayList;
@@ -54,8 +54,8 @@
import java.util.Stack;
import java.util.concurrent.atomic.AtomicInteger;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Base class for test striped file write operation.
@@ -95,7 +95,7 @@ public ECSchema getEcSchema() {
/*
* Initialize erasure coding policy.
*/
- @Before
+ @BeforeEach
public void init() {
ecPolicy = new ErasureCodingPolicy(getEcSchema(), cellSize);
dataBlocks = ecPolicy.getNumDataUnits();
@@ -245,7 +245,7 @@ void runTest(final int length) {
final String err = "failed, dn=" + dn + ", length=" + length
+ StringUtils.stringifyException(e);
LOG.error(err);
- Assert.fail(err);
+ Assertions.fail(err);
} finally {
tearDown();
}
@@ -389,15 +389,15 @@ static DatanodeInfo getDatanodes(StripedDataStreamer streamer) {
}
if (datanodes != null) {
- Assert.assertEquals(1, datanodes.length);
- Assert.assertNotNull(datanodes[0]);
+ Assertions.assertEquals(1, datanodes.length);
+ Assertions.assertNotNull(datanodes[0]);
return datanodes[0];
}
try {
Thread.sleep(100);
} catch (InterruptedException ie) {
- Assert.fail(StringUtils.stringifyException(ie));
+ Assertions.fail(StringUtils.stringifyException(ie));
return null;
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
index ddf5461181b1f..a3972bbfc4cf2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
@@ -22,10 +22,7 @@
import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName;
import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName;
import static org.apache.hadoop.test.GenericTestUtils.assertExists;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
import java.io.File;
import java.io.IOException;
@@ -45,18 +42,17 @@
import org.apache.hadoop.hdfs.server.namenode.TestParallelImageWrite;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.util.StringUtils;
-import org.junit.BeforeClass;
-import org.junit.Ignore;
import org.junit.Test;
-
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Disabled;
import org.apache.hadoop.thirdparty.com.google.common.base.Charsets;
import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
/**
-* This test ensures the appropriate response (successful or failure) from
-* the system when the system is upgraded under various storage state and
-* version conditions.
-*/
+ * This test ensures the appropriate response (successful or failure) from
+ * the system when the system is upgraded under various storage state and
+ * version conditions.
+ */
public class TestDFSUpgrade {
// TODO: Avoid hard-coding expected_txid. The test should be more robust.
@@ -171,16 +167,16 @@ void startNameNodeShouldFail(StartupOption operation,
} catch (Exception e) {
// expect exception
if (exceptionClass != null) {
- assertTrue("Caught exception is not of expected class "
- + exceptionClass.getSimpleName() + ": "
- + StringUtils.stringifyException(e),
- exceptionClass.isInstance(e));
+ assertTrue(
+ exceptionClass.isInstance(e), "Caught exception is not of expected class "
+ + exceptionClass.getSimpleName() + ": "
+ + StringUtils.stringifyException(e));
}
if (messagePattern != null) {
- assertTrue("Caught exception message string does not match expected pattern \""
- + messagePattern.pattern() + "\" : "
- + StringUtils.stringifyException(e),
- messagePattern.matcher(e.getMessage()).find());
+ assertTrue(
+ messagePattern.matcher(e.getMessage()).find(), "Caught exception message string does not match expected pattern \""
+ + messagePattern.pattern() + "\" : "
+ + StringUtils.stringifyException(e));
}
LOG.info("Successfully detected expected NameNode startup failure.");
}
@@ -195,8 +191,8 @@ void startNameNodeShouldFail(StartupOption operation,
*/
void startBlockPoolShouldFail(StartupOption operation, String bpid) throws IOException {
cluster.startDataNodes(conf, 1, false, operation, null); // should fail
- assertFalse("Block pool " + bpid + " should have failed to start",
- cluster.getDataNodes().get(0).isBPServiceAlive(bpid));
+ assertFalse(
+ cluster.getDataNodes().get(0).isBPServiceAlive(bpid), "Block pool " + bpid + " should have failed to start");
}
/**
@@ -212,7 +208,7 @@ private MiniDFSCluster createCluster() throws IOException {
.build();
}
- @BeforeClass
+ @BeforeAll
public static void initialize() throws Exception {
UpgradeUtilities.initialize();
}
@@ -404,7 +400,7 @@ public void testUpgrade() throws Exception {
* Stand-alone test to detect failure of one SD during parallel upgrade.
* At this time, can only be done with manual hack of {@link FSImage.doUpgrade()}
*/
- @Ignore
+ @Disabled
public void testUpgrade4() throws Exception {
int numDirs = 4;
conf = new HdfsConfiguration();
@@ -432,7 +428,7 @@ private void deleteStorageFilesWithPrefix(String[] nameNodeDirs, String prefix)
File currentDir = new File(baseDir, "current");
for (File f : currentDir.listFiles()) {
if (f.getName().startsWith(prefix)) {
- assertTrue("Deleting " + f, f.delete());
+ assertTrue(f.delete(), "Deleting " + f);
}
}
}
@@ -446,7 +442,7 @@ public void testUpgradeFromPreUpgradeLVFails() throws IOException {
fail("Expected IOException is not thrown");
}
- @Ignore
+ @Disabled
public void test203LayoutVersion() {
for (int lv : Storage.LAYOUT_VERSIONS_203) {
assertTrue(Storage.is203LayoutVersion(lv));
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
index 5469ebbb757c2..27a23967e79d6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
@@ -48,9 +48,9 @@
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.log4j.Logger;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.*;
/**
* This tests data transfer protocol handling in the Datanode. It sends
@@ -161,7 +161,7 @@ private void verifyChecksum(String path, long checksum) throws IOException {
// The paths are expected to be listed in the same order
// as they are traversed here.
assertEquals(info.path, path);
- assertEquals("Checking checksum for " + path, info.checksum, checksum);
+ assertEquals(info.checksum, checksum, "Checking checksum for " + path);
}
}
@@ -251,9 +251,9 @@ public void testFailOnPreUpgradeImage() throws IOException {
// Set up a fake NN storage that looks like an ancient Hadoop dir circa 0.3.0
FileUtil.fullyDelete(namenodeStorage);
- assertTrue("Make " + namenodeStorage, namenodeStorage.mkdirs());
+ assertTrue(namenodeStorage.mkdirs(), "Make " + namenodeStorage);
File imageDir = new File(namenodeStorage, "image");
- assertTrue("Make " + imageDir, imageDir.mkdirs());
+ assertTrue(imageDir.mkdirs(), "Make " + imageDir);
// Hex dump of a formatted image from Hadoop 0.3.0
File imageFile = new File(imageDir, "fsimage");
@@ -333,7 +333,7 @@ public void testUpgradeFromCorruptRel22Image() throws IOException {
}
int md5failures = appender.countExceptionsWithMessage(
" is corrupt with MD5 checksum of ");
- assertEquals("Upgrade did not fail with bad MD5", 1, md5failures);
+ assertEquals(1, md5failures, "Upgrade did not fail with bad MD5");
}
}
@@ -395,10 +395,10 @@ public void testUpgradeFromRel1ReservedImage() throws Exception {
}
}
for (String s: expected) {
- assertTrue("Did not find expected path " + s, found.contains(s));
+ assertTrue(found.contains(s), "Did not find expected path " + s);
}
- assertEquals("Found an unexpected path while listing filesystem",
- found.size(), expected.length);
+ assertEquals(
+ found.size(), expected.length, "Found an unexpected path while listing filesystem");
}
} finally {
if (cluster != null) {
@@ -459,10 +459,10 @@ public void testUpgradeFromRel023ReservedImage() throws Exception {
}
}
for (String s: expected) {
- assertTrue("Did not find expected path " + s, found.contains(s));
+ assertTrue(found.contains(s), "Did not find expected path " + s);
}
- assertEquals("Found an unexpected path while listing filesystem",
- found.size(), expected.length);
+ assertEquals(
+ found.size(), expected.length, "Found an unexpected path while listing filesystem");
}
} finally {
if (cluster != null) {
@@ -554,10 +554,10 @@ public void testUpgradeFromRel2ReservedImage() throws Exception {
}
}
for (String s: expected) {
- assertTrue("Did not find expected path " + s, found.contains(s));
+ assertTrue(found.contains(s), "Did not find expected path " + s);
}
- assertEquals("Found an unexpected path while listing filesystem",
- found.size(), expected.length);
+ assertEquals(
+ found.size(), expected.length, "Found an unexpected path while listing filesystem");
}
} finally {
if (cluster != null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
index 9a024c3084586..3e5a76d4f16e2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
@@ -38,13 +38,8 @@
import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
import static org.hamcrest.CoreMatchers.not;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.jupiter.api.Assertions.*;
import java.io.File;
import java.io.IOException;
@@ -81,9 +76,9 @@
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.LambdaTestUtils;
import org.apache.hadoop.util.Sets;
-import org.junit.Assert;
-import org.junit.Before;
import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
public class TestDFSUtil {
@@ -96,7 +91,7 @@ public class TestDFSUtil {
/**
* Reset to default UGI settings since some tests change them.
*/
- @Before
+ @BeforeEach
public void resetUGI() {
UserGroupInformation.setConfiguration(new Configuration());
}
@@ -127,8 +122,8 @@ public void testLocatedBlocks2Locations() {
BlockLocation[] bs = DFSUtilClient.locatedBlocks2Locations(lbs);
- assertTrue("expected 2 blocks but got " + bs.length,
- bs.length == 2);
+ assertTrue(
+ bs.length == 2, "expected 2 blocks but got " + bs.length);
int corruptCount = 0;
for (BlockLocation b: bs) {
@@ -137,8 +132,8 @@ public void testLocatedBlocks2Locations() {
}
}
- assertTrue("expected 1 corrupt files but got " + corruptCount,
- corruptCount == 1);
+ assertTrue(
+ corruptCount == 1, "expected 1 corrupt files but got " + corruptCount);
// test an empty location
bs = DFSUtilClient.locatedBlocks2Locations(new LocatedBlocks());
@@ -684,11 +679,11 @@ public void testGetNNUris() throws Exception {
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://" + NN1_ADDR);
Collection uris = DFSUtil.getInternalNsRpcUris(conf);
- assertEquals("Incorrect number of URIs returned", 2, uris.size());
- assertTrue("Missing URI for name service ns1",
- uris.contains(new URI("hdfs://" + NS1_NN1_ADDR)));
- assertTrue("Missing URI for service address",
- uris.contains(new URI("hdfs://" + NN2_ADDR)));
+ assertEquals(2, uris.size(), "Incorrect number of URIs returned");
+ assertTrue(
+ uris.contains(new URI("hdfs://" + NS1_NN1_ADDR)), "Missing URI for name service ns1");
+ assertTrue(
+ uris.contains(new URI("hdfs://" + NN2_ADDR)), "Missing URI for service address");
conf = new HdfsConfiguration();
conf.set(DFS_NAMESERVICES, "ns1,ns2");
@@ -721,15 +716,15 @@ public void testGetNNUris() throws Exception {
+ "IPFailoverProxyProvider");
uris = DFSUtil.getInternalNsRpcUris(conf);
- assertEquals("Incorrect number of URIs returned", 3, uris.size());
- assertTrue("Missing URI for RPC address",
- uris.contains(new URI("hdfs://" + NN1_ADDR)));
- assertTrue("Missing URI for name service ns2",
- uris.contains(new URI(HdfsConstants.HDFS_URI_SCHEME + "://" +
- NS1_NN_ADDR)));
- assertTrue("Missing URI for name service ns2",
- uris.contains(new URI(HdfsConstants.HDFS_URI_SCHEME + "://" +
- NS2_NN_ADDR)));
+ assertEquals(3, uris.size(), "Incorrect number of URIs returned");
+ assertTrue(
+ uris.contains(new URI("hdfs://" + NN1_ADDR)), "Missing URI for RPC address");
+ assertTrue(
+ uris.contains(new URI(HdfsConstants.HDFS_URI_SCHEME + "://" +
+ NS1_NN_ADDR)), "Missing URI for name service ns2");
+ assertTrue(
+ uris.contains(new URI(HdfsConstants.HDFS_URI_SCHEME + "://" +
+ NS2_NN_ADDR)), "Missing URI for name service ns2");
/**
* Second, test ns1 with {@link ConfiguredFailoverProxyProvider} which does
@@ -740,57 +735,57 @@ public void testGetNNUris() throws Exception {
+ "ConfiguredFailoverProxyProvider");
uris = DFSUtil.getInternalNsRpcUris(conf);
- assertEquals("Incorrect number of URIs returned", 3, uris.size());
- assertTrue("Missing URI for name service ns1",
- uris.contains(new URI("hdfs://ns1")));
- assertTrue("Missing URI for name service ns2",
- uris.contains(new URI("hdfs://" + NS2_NN_ADDR)));
- assertTrue("Missing URI for RPC address",
- uris.contains(new URI("hdfs://" + NN1_ADDR)));
+ assertEquals(3, uris.size(), "Incorrect number of URIs returned");
+ assertTrue(
+ uris.contains(new URI("hdfs://ns1")), "Missing URI for name service ns1");
+ assertTrue(
+ uris.contains(new URI("hdfs://" + NS2_NN_ADDR)), "Missing URI for name service ns2");
+ assertTrue(
+ uris.contains(new URI("hdfs://" + NN1_ADDR)), "Missing URI for RPC address");
// Make sure that non-HDFS URIs in fs.defaultFS don't get included.
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,
"viewfs://vfs-name.example.com");
uris = DFSUtil.getInternalNsRpcUris(conf);
- assertEquals("Incorrect number of URIs returned", 3, uris.size());
- assertTrue("Missing URI for name service ns1",
- uris.contains(new URI("hdfs://ns1")));
- assertTrue("Missing URI for name service ns2",
- uris.contains(new URI("hdfs://" + NS2_NN_ADDR)));
- assertTrue("Missing URI for RPC address",
- uris.contains(new URI("hdfs://" + NN1_ADDR)));
+ assertEquals(3, uris.size(), "Incorrect number of URIs returned");
+ assertTrue(
+ uris.contains(new URI("hdfs://ns1")), "Missing URI for name service ns1");
+ assertTrue(
+ uris.contains(new URI("hdfs://" + NS2_NN_ADDR)), "Missing URI for name service ns2");
+ assertTrue(
+ uris.contains(new URI("hdfs://" + NN1_ADDR)), "Missing URI for RPC address");
// Make sure that an HA URI being the default URI doesn't result in multiple
// entries being returned.
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://ns1");
uris = DFSUtil.getInternalNsRpcUris(conf);
- assertEquals("Incorrect number of URIs returned", 3, uris.size());
- assertTrue("Missing URI for name service ns1",
- uris.contains(new URI("hdfs://ns1")));
- assertTrue("Missing URI for name service ns2",
- uris.contains(new URI("hdfs://" + NS2_NN_ADDR)));
- assertTrue("Missing URI for RPC address",
- uris.contains(new URI("hdfs://" + NN1_ADDR)));
+ assertEquals(3, uris.size(), "Incorrect number of URIs returned");
+ assertTrue(
+ uris.contains(new URI("hdfs://ns1")), "Missing URI for name service ns1");
+ assertTrue(
+ uris.contains(new URI("hdfs://" + NS2_NN_ADDR)), "Missing URI for name service ns2");
+ assertTrue(
+ uris.contains(new URI("hdfs://" + NN1_ADDR)), "Missing URI for RPC address");
// Check that the default URI is returned if there's nothing else to return.
conf = new HdfsConfiguration();
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://" + NN1_ADDR);
uris = DFSUtil.getInternalNsRpcUris(conf);
- assertEquals("Incorrect number of URIs returned", 1, uris.size());
- assertTrue("Missing URI for RPC address (defaultFS)",
- uris.contains(new URI("hdfs://" + NN1_ADDR)));
+ assertEquals(1, uris.size(), "Incorrect number of URIs returned");
+ assertTrue(
+ uris.contains(new URI("hdfs://" + NN1_ADDR)), "Missing URI for RPC address (defaultFS)");
// Check that the RPC address is the only address returned when the RPC
// and the default FS is given.
conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, NN2_ADDR);
uris = DFSUtil.getInternalNsRpcUris(conf);
- assertEquals("Incorrect number of URIs returned", 1, uris.size());
- assertTrue("Missing URI for RPC address",
- uris.contains(new URI("hdfs://" + NN2_ADDR)));
+ assertEquals(1, uris.size(), "Incorrect number of URIs returned");
+ assertTrue(
+ uris.contains(new URI("hdfs://" + NN2_ADDR)), "Missing URI for RPC address");
// Make sure that when a service RPC address is used that is distinct from
// the client RPC address, and that client RPC address is also used as the
@@ -799,9 +794,9 @@ public void testGetNNUris() throws Exception {
conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, NN1_ADDR);
uris = DFSUtil.getInternalNsRpcUris(conf);
- assertEquals("Incorrect number of URIs returned", 1, uris.size());
- assertTrue("Missing URI for service ns1",
- uris.contains(new URI("hdfs://" + NN1_ADDR)));
+ assertEquals(1, uris.size(), "Incorrect number of URIs returned");
+ assertTrue(
+ uris.contains(new URI("hdfs://" + NN1_ADDR)), "Missing URI for service ns1");
// Check that when the default FS and service address are given, but
// the RPC address isn't, that only the service address is returned.
@@ -810,9 +805,9 @@ public void testGetNNUris() throws Exception {
conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, NN1_SRVC_ADDR);
uris = DFSUtil.getInternalNsRpcUris(conf);
- assertEquals("Incorrect number of URIs returned", 1, uris.size());
- assertTrue("Missing URI for service address",
- uris.contains(new URI("hdfs://" + NN1_SRVC_ADDR)));
+ assertEquals(1, uris.size(), "Incorrect number of URIs returned");
+ assertTrue(
+ uris.contains(new URI("hdfs://" + NN1_SRVC_ADDR)), "Missing URI for service address");
}
@Test
@@ -840,9 +835,9 @@ public void testGetNNUris2() throws Exception {
Collection uris = DFSUtil.getInternalNsRpcUris(conf);
- assertEquals("Incorrect number of URIs returned", 1, uris.size());
- assertTrue("Missing URI for name service ns1",
- uris.contains(new URI("hdfs://ns1")));
+ assertEquals(1, uris.size(), "Incorrect number of URIs returned");
+ assertTrue(
+ uris.contains(new URI("hdfs://ns1")), "Missing URI for name service ns1");
}
@Test (timeout=15000)
@@ -877,19 +872,19 @@ public void testGetSpnegoKeytabKey() {
HdfsConfiguration conf = new HdfsConfiguration();
String defaultKey = "default.spengo.key";
conf.unset(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY);
- assertEquals("Test spnego key in config is null", defaultKey,
- DFSUtil.getSpnegoKeytabKey(conf, defaultKey));
+ assertEquals(defaultKey,
+ DFSUtil.getSpnegoKeytabKey(conf, defaultKey), "Test spnego key in config is null");
conf.set(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY, "");
- assertEquals("Test spnego key is empty", defaultKey,
- DFSUtil.getSpnegoKeytabKey(conf, defaultKey));
+ assertEquals(defaultKey,
+ DFSUtil.getSpnegoKeytabKey(conf, defaultKey), "Test spnego key is empty");
String spengoKey = "spengo.key";
conf.set(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY,
spengoKey);
- assertEquals("Test spnego key is NOT null",
- DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY,
- DFSUtil.getSpnegoKeytabKey(conf, defaultKey));
+ assertEquals(
+ DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY,
+ DFSUtil.getSpnegoKeytabKey(conf, defaultKey), "Test spnego key is NOT null");
}
@Test(timeout=10000)
@@ -1016,15 +1011,15 @@ public void testGetPassword() throws Exception {
DFS_SERVER_HTTPS_TRUSTSTORE_PASSWORD_KEY).getCredential());
// use WebAppUtils as would be used by loadSslConfiguration
- Assert.assertEquals("keypass",
+ Assertions.assertEquals("keypass",
DFSUtil.getPassword(conf, DFS_SERVER_HTTPS_KEYPASSWORD_KEY));
- Assert.assertEquals("storepass",
+ Assertions.assertEquals("storepass",
DFSUtil.getPassword(conf, DFS_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY));
- Assert.assertEquals("trustpass",
+ Assertions.assertEquals("trustpass",
DFSUtil.getPassword(conf, DFS_SERVER_HTTPS_TRUSTSTORE_PASSWORD_KEY));
// let's make sure that a password that doesn't exist returns null
- Assert.assertEquals(null, DFSUtil.getPassword(conf,"invalid-alias"));
+ Assertions.assertEquals(null, DFSUtil.getPassword(conf,"invalid-alias"));
}
@Test
@@ -1065,20 +1060,20 @@ public void testGetNNServiceRpcAddressesForNsIds() throws IOException {
public void testEncryptionProbe() throws Throwable {
Configuration conf = new Configuration(false);
conf.unset(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH);
- assertFalse("encryption enabled on no provider key",
- DFSUtilClient.isHDFSEncryptionEnabled(conf));
+ assertFalse(
+ DFSUtilClient.isHDFSEncryptionEnabled(conf), "encryption enabled on no provider key");
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
"");
- assertFalse("encryption enabled on empty provider key",
- DFSUtilClient.isHDFSEncryptionEnabled(conf));
+ assertFalse(
+ DFSUtilClient.isHDFSEncryptionEnabled(conf), "encryption enabled on empty provider key");
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
"\n\t\n");
- assertFalse("encryption enabled on whitespace provider key",
- DFSUtilClient.isHDFSEncryptionEnabled(conf));
+ assertFalse(
+ DFSUtilClient.isHDFSEncryptionEnabled(conf), "encryption enabled on whitespace provider key");
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
"http://hadoop.apache.org");
- assertTrue("encryption disabled on valid provider key",
- DFSUtilClient.isHDFSEncryptionEnabled(conf));
+ assertTrue(
+ DFSUtilClient.isHDFSEncryptionEnabled(conf), "encryption disabled on valid provider key");
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataStream.java
index c57ef941f0ef1..0cb732c8b27b4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataStream.java
@@ -27,15 +27,15 @@
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
public class TestDataStream {
static MiniDFSCluster cluster;
static int PACKET_SIZE = 1024;
- @BeforeClass
+ @BeforeAll
public static void setup() throws IOException {
Configuration conf = new Configuration();
conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY,
@@ -77,7 +77,7 @@ public void testDfsClient() throws IOException, InterruptedException {
"Slow ReadProcessor read fields for block");
}
- @AfterClass
+ @AfterAll
public static void tearDown() {
cluster.shutdown();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java
index 9881f9250b2ae..6d4297d385d2a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java
@@ -23,10 +23,7 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
import java.io.InputStream;
@@ -40,9 +37,9 @@
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.ReflectionUtils;
-import org.junit.After;
-import org.junit.Before;
import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
import java.util.function.Supplier;
@@ -55,7 +52,7 @@ public class TestDataTransferKeepalive {
private static final int KEEPALIVE_TIMEOUT = 1000;
private static final int WRITE_TIMEOUT = 3000;
- @Before
+ @BeforeEach
public void setup() throws Exception {
conf.setInt(DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY,
KEEPALIVE_TIMEOUT);
@@ -67,7 +64,7 @@ public void setup() throws Exception {
dn = cluster.getDataNodes().get(0);
}
- @After
+ @AfterEach
public void teardown() {
if (cluster != null) {
cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
index b1a675c77b62a..2e5097c419be3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
@@ -17,9 +17,7 @@
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java
index 989e9fc0a5583..0a56677dd773a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java
@@ -18,10 +18,8 @@
package org.apache.hadoop.hdfs;
import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.junit.Assume.assumeTrue;
+import static org.junit.jupiter.api.Assertions.*;
+import static org.junit.jupiter.api.Assumptions.assumeTrue;
import java.io.File;
import java.io.IOException;
@@ -34,9 +32,9 @@
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
/**
* Tests if a data-node can startup depending on configuration parameters.
@@ -47,7 +45,7 @@ public class TestDatanodeConfig {
private static MiniDFSCluster cluster;
- @BeforeClass
+ @BeforeAll
public static void setUp() throws Exception {
clearBaseDir();
Configuration conf = new HdfsConfiguration();
@@ -59,7 +57,7 @@ public static void setUp() throws Exception {
cluster.waitActive();
}
- @AfterClass
+ @AfterAll
public static void tearDown() throws Exception {
if(cluster != null)
cluster.shutdown();
@@ -95,7 +93,7 @@ public void testDataDirectories() throws IOException {
dn.shutdown();
}
}
- assertNull("Data-node startup should have failed.", dn);
+ assertNull(dn, "Data-node startup should have failed.");
// 2. Test "file:" ecPolicy and no ecPolicy (path-only). Both should work.
String dnDir1 = fileAsURI(dataDir).toString() + "1";
@@ -106,7 +104,7 @@ public void testDataDirectories() throws IOException {
dnDir1 + "," + dnDir2 + "," + dnDir3);
try {
cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
- assertTrue("Data-node should startup.", cluster.isDataNodeUp());
+ assertTrue(cluster.isDataNodeUp(), "Data-node should startup.");
} finally {
if (cluster != null) {
cluster.shutdownDataNodes();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java
index c5141f34ee961..ff94deecb540f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java
@@ -17,8 +17,8 @@
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.IOException;
@@ -34,7 +34,7 @@
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
import org.slf4j.event.Level;
/**
@@ -96,7 +96,7 @@ public void run() {
checkFile(fs, filename, replication, numBlocks, fileSize, myseed);
} catch (Throwable e) {
System.out.println("Workload exception " + e);
- assertTrue(e.toString(), false);
+ assertTrue(false, e.toString());
}
// increment the stamp to indicate that another file is done.
@@ -148,9 +148,9 @@ static private void checkFile(FileSystem fileSys, Path name, int repl,
int attempt = 0;
long len = fileSys.getFileStatus(name).getLen();
- assertTrue(name + " should be of size " + filesize +
- " but found to be of size " + len,
- len == filesize);
+ assertTrue(
+ len == filesize, name + " should be of size " + filesize +
+ " but found to be of size " + len);
// wait till all full blocks are confirmed by the datanodes.
while (!done) {
@@ -198,9 +198,9 @@ static private void checkFile(FileSystem fileSys, Path name, int repl,
private static void checkData(byte[] actual, int from, byte[] expected, String message) {
for (int idx = 0; idx < actual.length; idx++) {
- assertEquals(message+" byte "+(from+idx)+" differs. expected "+
- expected[from+idx]+" actual "+actual[idx],
- actual[idx], expected[from+idx]);
+ assertEquals(
+ actual[idx], expected[from + idx], message + " byte " + (from + idx) + " differs. expected " +
+ expected[from + idx] + " actual " + actual[idx]);
actual[idx] = 0;
}
}
@@ -259,7 +259,7 @@ public void run() {
// cluster.startDataNodes(conf, 1, true, null, null);
} catch (IOException e) {
System.out.println("TestDatanodeDeath Modify exception " + e);
- assertTrue("TestDatanodeDeath Modify exception " + e, false);
+ assertTrue(false, "TestDatanodeDeath Modify exception " + e);
running = false;
}
}
@@ -399,7 +399,7 @@ private void simpleTest(int datanodeToKill) throws IOException {
} catch (Throwable e) {
System.out.println("Simple Workload exception " + e);
e.printStackTrace();
- assertTrue(e.toString(), false);
+ assertTrue(false, e.toString());
} finally {
fs.close();
cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeLayoutUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeLayoutUpgrade.java
index 0e2f4e4ee09ab..6d23f3a6d13f4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeLayoutUpgrade.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeLayoutUpgrade.java
@@ -20,7 +20,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
import java.io.File;
import java.io.IOException;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java
index f029ee5298eeb..ed9798b331091 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java
@@ -39,7 +39,7 @@
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.VersionInfo;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
import java.util.function.Supplier;
@@ -47,7 +47,7 @@
import java.security.Permission;
import java.util.concurrent.TimeoutException;
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.*;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
@@ -88,7 +88,7 @@ public void testDNSLookups() throws Exception {
cluster.waitActive();
int initialLookups = sm.lookups;
- assertTrue("dns security manager is active", initialLookups != 0);
+ assertTrue(initialLookups != 0, "dns security manager is active");
DatanodeManager dm =
cluster.getNamesystem().getBlockManager().getDatanodeManager();
@@ -198,7 +198,7 @@ public void testChangeStorageID() throws Exception {
rpcServer.registerDatanode(dnReg);
DatanodeInfo[] report = client.datanodeReport(DatanodeReportType.ALL);
- assertEquals("Expected a registered datanode", 1, report.length);
+ assertEquals(1, report.length, "Expected a registered datanode");
// register the same datanode again with a different storage ID
dnId = new DatanodeID(DN_IP_ADDR, DN_HOSTNAME,
@@ -209,8 +209,8 @@ public void testChangeStorageID() throws Exception {
rpcServer.registerDatanode(dnReg);
report = client.datanodeReport(DatanodeReportType.ALL);
- assertEquals("Datanode with changed storage ID not recognized",
- 1, report.length);
+ assertEquals(
+ 1, report.length, "Datanode with changed storage ID not recognized");
} finally {
if (cluster != null) {
cluster.shutdown();
@@ -366,16 +366,16 @@ public void testForcedRegistration() throws Exception {
waitForHeartbeat(dn, dnd);
assertTrue(dnd.isRegistered());
assertSame(lastReg, dn.getDNRegistrationForBP(bpId));
- assertTrue("block report is not processed for DN " + dnd,
- waitForBlockReport(dn, dnd));
+ assertTrue(
+ waitForBlockReport(dn, dnd), "block report is not processed for DN " + dnd);
assertTrue(dnd.isRegistered());
assertSame(lastReg, dn.getDNRegistrationForBP(bpId));
// check that block report is not processed and registration didn't
// change.
dnd.setForceRegistration(true);
- assertFalse("block report is processed for DN " + dnd,
- waitForBlockReport(dn, dnd));
+ assertFalse(
+ waitForBlockReport(dn, dnd), "block report is processed for DN " + dnd);
assertFalse(dnd.isRegistered());
assertSame(lastReg, dn.getDNRegistrationForBP(bpId));
@@ -386,8 +386,8 @@ public void testForcedRegistration() throws Exception {
newReg = dn.getDNRegistrationForBP(bpId);
assertNotSame(lastReg, newReg);
lastReg = newReg;
- assertTrue("block report is not processed for DN " + dnd,
- waitForBlockReport(dn, dnd));
+ assertTrue(
+ waitForBlockReport(dn, dnd), "block report is not processed for DN " + dnd);
assertTrue(dnd.isRegistered());
assertSame(lastReg, dn.getDNRegistrationForBP(bpId));
@@ -407,7 +407,7 @@ public void testForcedRegistration() throws Exception {
} catch (NullPointerException npe) {
failed = true;
}
- assertTrue("didn't fail", failed);
+ assertTrue(failed, "didn't fail");
assertFalse(dnd.isRegistered());
// should remain unregistered until next heartbeat.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
index de738eef177a3..defd59158bb16 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
@@ -19,7 +19,7 @@
import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
import java.io.IOException;
import java.util.Arrays;
@@ -42,8 +42,8 @@
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.hdfs.util.HostsFileWriter;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
/**
* This test ensures the all types of data node report work correctly.
@@ -167,7 +167,7 @@ public void testDatanodeReportMissingBlock() throws Exception {
cluster.corruptBlockOnDataNodesByDeletingBlockFile(b);
try {
DFSTestUtil.readFile(fs, p);
- Assert.fail("Must throw exception as the block doesn't exists on disk");
+ Assertions.fail("Must throw exception as the block doesn't exists on disk");
} catch (IOException e) {
// all bad datanodes
}
@@ -178,7 +178,7 @@ public void testDatanodeReportMissingBlock() throws Exception {
if (0 != lb.getLocations().length) {
retries++;
if (retries > 7) {
- Assert.fail("getLocatedBlocks failed after 7 retries");
+ Assertions.fail("getLocatedBlocks failed after 7 retries");
}
Thread.sleep(2000);
} else {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeStartupFixesLegacyStorageIDs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeStartupFixesLegacyStorageIDs.java
index 659a8c162b12b..0cd7db0faea97 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeStartupFixesLegacyStorageIDs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeStartupFixesLegacyStorageIDs.java
@@ -29,9 +29,9 @@
import org.apache.hadoop.hdfs.TestDFSUpgradeFromImage.ClusterVerifier;
+import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.core.Is.is;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeadNodeDetection.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeadNodeDetection.java
index e8da918e1f7cc..c757a53d2b46d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeadNodeDetection.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeadNodeDetection.java
@@ -26,10 +26,10 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
import org.mockito.Mockito;
import java.io.IOException;
@@ -44,12 +44,7 @@
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DEAD_NODE_DETECTION_PROBE_SUSPECT_NODE_INTERVAL_MS_KEY;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DEAD_NODE_DETECTION_IDLE_SLEEP_MS_KEY;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertSame;
-import static org.junit.Assert.assertNotSame;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
/**
* Tests for dead node detection in DFSClient.
@@ -59,7 +54,7 @@ public class TestDeadNodeDetection {
private MiniDFSCluster cluster;
private Configuration conf;
- @Before
+ @BeforeEach
public void setUp() {
cluster = null;
conf = new HdfsConfiguration();
@@ -77,7 +72,7 @@ public void setUp() {
conf.setLong(DFS_CLIENT_DEAD_NODE_DETECTION_IDLE_SLEEP_MS_KEY, 100);
}
- @After
+ @AfterEach
public void tearDown() {
if (cluster != null) {
cluster.shutdown();
@@ -328,15 +323,15 @@ public void testDeadNodeDetectionSuspectNode() throws Exception {
}
waitForSuspectNode(din.getDFSClient());
cluster.restartDataNode(one, true);
- Assert.assertEquals(1,
+ Assertions.assertEquals(1,
deadNodeDetector.getSuspectNodesProbeQueue().size());
- Assert.assertEquals(0,
+ Assertions.assertEquals(0,
deadNodeDetector.clearAndGetDetectedDeadNodes().size());
deadNodeDetector.startProbeScheduler();
Thread.sleep(1000);
- Assert.assertEquals(0,
+ Assertions.assertEquals(0,
deadNodeDetector.getSuspectNodesProbeQueue().size());
- Assert.assertEquals(0,
+ Assertions.assertEquals(0,
deadNodeDetector.clearAndGetDetectedDeadNodes().size());
} finally {
in.close();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
index f7e6dce003311..8af1e134e92c4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
@@ -17,11 +17,7 @@
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
@@ -77,9 +73,9 @@
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Lists;
import org.apache.hadoop.util.ToolRunner;
-import org.junit.Assert;
-import org.junit.Ignore;
import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Disabled;
import org.eclipse.jetty.util.ajax.JSON;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -106,9 +102,9 @@ public class TestDecommission extends AdminStatesBaseTest {
private static String checkFile(FileSystem fileSys, Path name, int repl,
String downnode, int numDatanodes) throws IOException {
boolean isNodeDown = (downnode != null);
- // need a raw stream
- assertTrue("Not HDFS:"+fileSys.getUri(),
- fileSys instanceof DistributedFileSystem);
+ // need a raw stream
+ assertTrue(
+ fileSys instanceof DistributedFileSystem, "Not HDFS:" + fileSys.getUri());
HdfsDataInputStream dis = (HdfsDataInputStream)
fileSys.open(name);
Collection dinfo = dis.getAllBlocks();
@@ -223,8 +219,8 @@ public void testDecommission2() throws IOException {
// Ensure decommissioned datanode is not automatically shutdown
DFSClient client = getDfsClient(0);
- assertEquals("All datanodes must be alive", numDatanodes,
- client.datanodeReport(DatanodeReportType.LIVE).length);
+ assertEquals(numDatanodes,
+ client.datanodeReport(DatanodeReportType.LIVE).length, "All datanodes must be alive");
assertNull(checkFile(fileSys, file1, replicas, decomNode.getXferAddr(),
numDatanodes));
cleanupFile(fileSys, file1);
@@ -397,8 +393,8 @@ private void testDecommission(int numNamenodes, int numDatanodes)
// Ensure decommissioned datanode is not automatically shutdown
DFSClient client = getDfsClient(i);
- assertEquals("All datanodes must be alive", numDatanodes,
- client.datanodeReport(DatanodeReportType.LIVE).length);
+ assertEquals(numDatanodes,
+ client.datanodeReport(DatanodeReportType.LIVE).length, "All datanodes must be alive");
// wait for the block to be replicated
int tries = 0;
while (tries++ < 20) {
@@ -411,8 +407,8 @@ private void testDecommission(int numNamenodes, int numDatanodes)
} catch (InterruptedException ie) {
}
}
- assertTrue("Checked if block was replicated after decommission, tried "
- + tries + " times.", tries < 20);
+ assertTrue(tries < 20, "Checked if block was replicated after decommission, tried "
+ + tries + " times.");
cleanupFile(fileSys, file1);
}
}
@@ -445,8 +441,8 @@ public void testRecommission() throws Exception {
// Decommission one of the datanodes with a replica
BlockLocation loc = fileSys.getFileBlockLocations(file1, 0, 1)[0];
- assertEquals("Unexpected number of replicas from getFileBlockLocations",
- replicas, loc.getHosts().length);
+ assertEquals(
+ replicas, loc.getHosts().length, "Unexpected number of replicas from getFileBlockLocations");
final String toDecomHost = loc.getNames()[0];
String toDecomUuid = null;
for (DataNode d : getCluster().getDataNodes()) {
@@ -455,7 +451,7 @@ public void testRecommission() throws Exception {
break;
}
}
- assertNotNull("Could not find a dn with the block!", toDecomUuid);
+ assertNotNull(toDecomUuid, "Could not find a dn with the block!");
final DatanodeInfo decomNode = takeNodeOutofService(0, toDecomUuid,
0, decommissionedNodes, AdminStates.DECOMMISSIONED);
decommissionedNodes.add(decomNode);
@@ -467,8 +463,8 @@ public void testRecommission() throws Exception {
// Ensure decommissioned datanode is not automatically shutdown
DFSClient client = getDfsClient(0);
- assertEquals("All datanodes must be alive", numDatanodes,
- client.datanodeReport(DatanodeReportType.LIVE).length);
+ assertEquals(numDatanodes,
+ client.datanodeReport(DatanodeReportType.LIVE).length, "All datanodes must be alive");
// wait for the block to be replicated
final ExtendedBlock b = DFSTestUtil.getFirstBlock(fileSys, file1);
@@ -557,7 +553,7 @@ private DataNode getDataNode(DatanodeInfo decomInfo) {
break;
}
}
- assertNotNull("Could not find decomNode in cluster!", decomNode);
+ assertNotNull(decomNode, "Could not find decomNode in cluster!");
return decomNode;
}
@@ -603,14 +599,14 @@ public void testHostsFile(int numNameNodes) throws IOException,
Thread.sleep(HEARTBEAT_INTERVAL * 1000);
info = client.datanodeReport(DatanodeReportType.LIVE);
}
- assertEquals("Number of live nodes should be 0", 0, info.length);
+ assertEquals(0, info.length, "Number of live nodes should be 0");
// Test that bogus hostnames are considered "dead".
// The dead report should have an entry for the bogus entry in the hosts
// file. The original datanode is excluded from the report because it
// is no longer in the included list.
info = client.datanodeReport(DatanodeReportType.DEAD);
- assertEquals("There should be 1 dead node", 1, info.length);
+ assertEquals(1, info.length, "There should be 1 dead node");
assertEquals(bogusIp, info[0].getHostName());
}
}
@@ -926,7 +922,7 @@ public void testDecommissionWithCloseFileAndListOpenFiles()
OpenFilesIterator.FILTER_PATH_DEFAULT);
assertEquals(0, batchedListEntries.size());
} catch (NullPointerException e) {
- Assert.fail("Should not throw NPE when the file is not under " +
+ Assertions.fail("Should not throw NPE when the file is not under " +
"construction but has lease!");
}
initExcludeHost("");
@@ -1152,20 +1148,20 @@ public void testDecommissionWithNamenodeRestart()
getCluster().startDataNodes(getConf(), 1, true, null, null, null, null);
numDatanodes+=1;
- assertEquals("Number of datanodes should be 2 ", 2,
- getCluster().getDataNodes().size());
+ assertEquals(2,
+ getCluster().getDataNodes().size(), "Number of datanodes should be 2 ");
//Restart the namenode
getCluster().restartNameNode();
DatanodeInfo datanodeInfo = NameNodeAdapter.getDatanode(
getCluster().getNamesystem(), excludedDatanodeID);
waitNodeState(datanodeInfo, AdminStates.DECOMMISSIONED);
- // Ensure decommissioned datanode is not automatically shutdown
- assertEquals("All datanodes must be alive", numDatanodes,
- client.datanodeReport(DatanodeReportType.LIVE).length);
- assertTrue("Checked if block was replicated after decommission.",
- checkFile(fileSys, file1, replicas, datanodeInfo.getXferAddr(),
- numDatanodes) == null);
+ // Ensure decommissioned datanode is not automatically shutdown
+ assertEquals(numDatanodes,
+ client.datanodeReport(DatanodeReportType.LIVE).length, "All datanodes must be alive");
+ assertTrue(
+ checkFile(fileSys, file1, replicas, datanodeInfo.getXferAddr(),
+ numDatanodes) == null, "Checked if block was replicated after decommission.");
cleanupFile(fileSys, file1);
// Restart the cluster and ensure recommissioned datanodes
@@ -1203,10 +1199,10 @@ public void testDeadNodeCountAfterNamenodeRestart()throws Exception {
//Restart the namenode
getCluster().restartNameNode();
- assertEquals("There should be one node alive", 1,
- client.datanodeReport(DatanodeReportType.LIVE).length);
- assertEquals("There should be one node dead", 1,
- client.datanodeReport(DatanodeReportType.DEAD).length);
+ assertEquals(1,
+ client.datanodeReport(DatanodeReportType.LIVE).length, "There should be one node alive");
+ assertEquals(1,
+ client.datanodeReport(DatanodeReportType.DEAD).length, "There should be one node dead");
}
/**
@@ -1223,7 +1219,7 @@ public void testDeadNodeCountAfterNamenodeRestart()throws Exception {
* It is not recommended to use a registration name which is not also a
* valid DNS hostname for the DataNode. See HDFS-5237 for background.
*/
- @Ignore
+ @Disabled
@Test(timeout=360000)
public void testIncludeByRegistrationName() throws Exception {
// Any IPv4 address starting with 127 functions as a "loopback" address
@@ -1277,8 +1273,8 @@ public Boolean get() {
try {
DatanodeInfo info[] = client.datanodeReport(DatanodeReportType.LIVE);
if (info.length == 1) {
- Assert.assertFalse(info[0].isDecommissioned());
- Assert.assertFalse(info[0].isDecommissionInProgress());
+ Assertions.assertFalse(info[0].isDecommissioned());
+ Assertions.assertFalse(info[0].isDecommissionInProgress());
assertEquals(registrationName, info[0].getHostName());
return true;
}
@@ -1334,8 +1330,8 @@ private void doDecomCheck(DatanodeManager datanodeManager,
}
// Run decom scan and check
BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
- assertEquals("Unexpected # of nodes checked", expectedNumCheckedNodes,
- decomManager.getNumNodesChecked());
+ assertEquals(expectedNumCheckedNodes,
+ decomManager.getNumNodesChecked(), "Unexpected # of nodes checked");
// Recommission all nodes
for (DatanodeInfo dn : decommissionedNodes) {
putNodeInService(0, dn);
@@ -1446,10 +1442,10 @@ public void testPendingNodes() throws Exception {
private void assertTrackedAndPending(DatanodeAdminManager decomManager,
int tracked, int pending) {
- assertEquals("Unexpected number of tracked nodes", tracked,
- decomManager.getNumTrackedNodes());
- assertEquals("Unexpected number of pending nodes", pending,
- decomManager.getNumPendingNodes());
+ assertEquals(tracked,
+ decomManager.getNumTrackedNodes(), "Unexpected number of tracked nodes");
+ assertEquals(pending,
+ decomManager.getNumPendingNodes(), "Unexpected number of pending nodes");
}
/**
@@ -1604,12 +1600,12 @@ public void testUsedCapacity() throws Exception {
long newTotalCapacity = datanodeStatistics.getCapacityTotal();
long newBlockPoolUsed = datanodeStatistics.getBlockPoolUsed();
- assertTrue("DfsUsedCapacity should not be the same after a node has " +
- "been decommissioned!", initialUsedCapacity != newUsedCapacity);
- assertTrue("TotalCapacity should not be the same after a node has " +
- "been decommissioned!", initialTotalCapacity != newTotalCapacity);
- assertTrue("BlockPoolUsed should not be the same after a node has " +
- "been decommissioned!",initialBlockPoolUsed != newBlockPoolUsed);
+ assertTrue(initialUsedCapacity != newUsedCapacity, "DfsUsedCapacity should not be the same after a node has " +
+ "been decommissioned!");
+ assertTrue(initialTotalCapacity != newTotalCapacity, "TotalCapacity should not be the same after a node has " +
+ "been decommissioned!");
+ assertTrue(initialBlockPoolUsed != newBlockPoolUsed, "BlockPoolUsed should not be the same after a node has " +
+ "been decommissioned!");
}
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithBackoffMonitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithBackoffMonitor.java
index 9c37a197b7ea7..99659c577347a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithBackoffMonitor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithBackoffMonitor.java
@@ -22,7 +22,7 @@
.DatanodeAdminBackoffMonitor;
import org.apache.hadoop.hdfs.server.blockmanagement
.DatanodeAdminMonitorInterface;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
import java.io.IOException;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java
index c68cb1707c2fe..59c1743156e59 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java
@@ -17,10 +17,7 @@
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
import java.io.IOException;
import java.util.ArrayList;
@@ -63,10 +60,10 @@
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.PathUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -113,7 +110,7 @@ protected Configuration createConfiguration() {
return new HdfsConfiguration();
}
- @Before
+ @BeforeEach
public void setup() throws IOException {
conf = createConfiguration();
// Set up the hosts/exclude files.
@@ -162,7 +159,7 @@ public void setup() throws IOException {
StripedFileTestUtil.getDefaultECPolicy().getName());
}
- @After
+ @AfterEach
public void teardown() throws IOException {
cleanupFile(localFileSys, decommissionDir);
if (cluster != null) {
@@ -209,7 +206,7 @@ public void testDecommissionWithURBlockForSameBlockGroup() throws Exception {
final Path ecFile = new Path(ecDir, "testDecommissionWithCorruptBlocks");
int writeBytes = cellSize * dataBlocks * 2;
writeStripedFile(dfs, ecFile, writeBytes);
- Assert.assertEquals(0, bm.numOfUnderReplicatedBlocks());
+ Assertions.assertEquals(0, bm.numOfUnderReplicatedBlocks());
final List decommisionNodes = new ArrayList();
LocatedBlock lb = dfs.getClient().getLocatedBlocks(ecFile.toString(), 0)
@@ -254,7 +251,7 @@ public void run() {
decommissionNode(0, decommisionNodes, AdminStates.DECOMMISSIONED);
} catch (Exception e) {
LOG.error("Exception while decommissioning", e);
- Assert.fail("Shouldn't throw exception!");
+ Assertions.fail("Shouldn't throw exception!");
}
};
};
@@ -279,9 +276,9 @@ public void run() {
assertEquals(liveDecommissioned + decommisionNodes.size(),
fsn.getNumDecomLiveDataNodes());
- // Ensure decommissioned datanode is not automatically shutdown
- assertEquals("All datanodes must be alive", numDNs,
- client.datanodeReport(DatanodeReportType.LIVE).length);
+ // Ensure decommissioned datanode is not automatically shutdown
+ assertEquals(numDNs,
+ client.datanodeReport(DatanodeReportType.LIVE).length, "All datanodes must be alive");
assertNull(checkFile(dfs, ecFile, 9, decommisionNodes, numDNs));
StripedFileTestUtil.checkData(dfs, ecFile, writeBytes, decommisionNodes,
@@ -301,7 +298,7 @@ public void testDecommissionWithBusyNode() throws Exception {
final Path ecFile = new Path(ecDir, "testDecommissionWithBusyNode");
int writeBytes = cellSize * dataBlocks;
writeStripedFile(dfs, ecFile, writeBytes);
- Assert.assertEquals(0, bm.numOfUnderReplicatedBlocks());
+ Assertions.assertEquals(0, bm.numOfUnderReplicatedBlocks());
FileChecksum fileChecksum1 = dfs.getFileChecksum(ecFile, writeBytes);
//2. make once DN busy
@@ -325,9 +322,9 @@ public void testDecommissionWithBusyNode() throws Exception {
//4. wait for decommission block to replicate
Thread.sleep(3000);
DatanodeStorageInfo[] newDnStorageInfos = bm.getStorages(firstBlock);
- Assert.assertEquals("Busy DN shouldn't be reconstructed",
- dnStorageInfos[busyDNIndex].getStorageID(),
- newDnStorageInfos[busyDNIndex].getStorageID());
+ Assertions.assertEquals(
+ dnStorageInfos[busyDNIndex].getStorageID(),
+ newDnStorageInfos[busyDNIndex].getStorageID(), "Busy DN shouldn't be reconstructed");
//5. check decommission DN block index, it should be reconstructed again
LocatedBlocks lbs = cluster.getNameNodeRpc().getBlockLocations(
@@ -340,12 +337,12 @@ public void testDecommissionWithBusyNode() throws Exception {
}
}
- Assert.assertEquals("Decommission DN block should be reconstructed", 2,
- decommissionBlockIndexCount);
+ Assertions.assertEquals(2,
+ decommissionBlockIndexCount, "Decommission DN block should be reconstructed");
FileChecksum fileChecksum2 = dfs.getFileChecksum(ecFile, writeBytes);
- Assert.assertTrue("Checksum mismatches!",
- fileChecksum1.equals(fileChecksum2));
+ Assertions.assertTrue(
+ fileChecksum1.equals(fileChecksum2), "Checksum mismatches!");
}
/**
@@ -363,7 +360,7 @@ public void testDecommission2NodeWithBusyNode() throws Exception {
int writeBytes = cellSize * dataBlocks;
writeStripedFile(dfs, ecFile, writeBytes);
- Assert.assertEquals(0, bm.numOfUnderReplicatedBlocks());
+ Assertions.assertEquals(0, bm.numOfUnderReplicatedBlocks());
FileChecksum fileChecksum1 = dfs.getFileChecksum(ecFile, writeBytes);
//2. make once DN busy
@@ -398,13 +395,13 @@ public void testDecommission2NodeWithBusyNode() throws Exception {
//7. Busy DN shouldn't be reconstructed
DatanodeStorageInfo[] newDnStorageInfos = bm.getStorages(firstBlock);
- Assert.assertEquals("Busy DN shouldn't be reconstructed",
- dnStorageInfos[busyDNIndex].getStorageID(),
- newDnStorageInfos[busyDNIndex].getStorageID());
+ Assertions.assertEquals(
+ dnStorageInfos[busyDNIndex].getStorageID(),
+ newDnStorageInfos[busyDNIndex].getStorageID(), "Busy DN shouldn't be reconstructed");
//8. check the checksum of a file
FileChecksum fileChecksum2 = dfs.getFileChecksum(ecFile, writeBytes);
- Assert.assertEquals("Checksum mismatches!", fileChecksum1, fileChecksum2);
+ Assertions.assertEquals(fileChecksum1, fileChecksum2, "Checksum mismatches!");
//9. check the data is correct
StripedFileTestUtil.checkData(dfs, ecFile, writeBytes, decommissionNodes,
@@ -430,7 +427,7 @@ public void testFileChecksumAfterDecommission() throws Exception {
final Path ecFile = new Path(ecDir, "testFileChecksumAfterDecommission");
int writeBytes = cellSize * dataBlocks;
writeStripedFile(dfs, ecFile, writeBytes);
- Assert.assertEquals(0, bm.numOfUnderReplicatedBlocks());
+ Assertions.assertEquals(0, bm.numOfUnderReplicatedBlocks());
FileChecksum fileChecksum1 = dfs.getFileChecksum(ecFile, writeBytes);
final List decommisionNodes = new ArrayList();
@@ -453,8 +450,8 @@ public void testFileChecksumAfterDecommission() throws Exception {
LOG.info("fileChecksum1:" + fileChecksum1);
LOG.info("fileChecksum2:" + fileChecksum2);
- Assert.assertTrue("Checksum mismatches!",
- fileChecksum1.equals(fileChecksum2));
+ Assertions.assertTrue(
+ fileChecksum1.equals(fileChecksum2), "Checksum mismatches!");
}
private void testDecommission(int writeBytes, int storageCount,
@@ -484,8 +481,8 @@ private void testDecommission(int writeBytes, int storageCount,
// Ensure decommissioned datanode is not automatically shutdown
DFSClient client = getDfsClient(cluster.getNameNode(0), conf);
- assertEquals("All datanodes must be alive", numDNs,
- client.datanodeReport(DatanodeReportType.LIVE).length);
+ assertEquals(numDNs,
+ client.datanodeReport(DatanodeReportType.LIVE).length, "All datanodes must be alive");
assertNull(checkFile(dfs, ecFile, storageCount, decommisionNodes, numDNs));
StripedFileTestUtil.checkData(dfs, ecFile, writeBytes, decommisionNodes,
@@ -531,10 +528,10 @@ private void assertBlockIndexAndTokenPosition(List lbs,
locToTokenList.get(i);
DatanodeInfo[] di = lb.getLocations();
for (int j = 0; j < di.length; j++) {
- Assert.assertEquals("Block index value mismatches after sorting",
- (byte) locToIndex.get(di[j]), stripedBlk.getBlockIndices()[j]);
- Assert.assertEquals("Block token value mismatches after sorting",
- locToToken.get(di[j]), stripedBlk.getBlockTokens()[j]);
+ Assertions.assertEquals(
+ (byte) locToIndex.get(di[j]), stripedBlk.getBlockIndices()[j], "Block index value mismatches after sorting");
+ Assertions.assertEquals(
+ locToToken.get(di[j]), stripedBlk.getBlockTokens()[j], "Block token value mismatches after sorting");
}
}
}
@@ -620,7 +617,7 @@ private void decommissionNode(int nnIndex,
break;
}
}
- assertTrue("Datanode: " + dn + " is not LIVE", nodeExists);
+ assertTrue(nodeExists, "Datanode: " + dn + " is not LIVE");
excludeNodes.add(dn.getName());
LOG.info("Decommissioning node: " + dn.getName());
}
@@ -671,9 +668,9 @@ private static String checkFile(FileSystem fileSys, Path name, int repl,
List decommissionedNodes, int numDatanodes)
throws IOException {
boolean isNodeDown = decommissionedNodes.size() > 0;
- // need a raw stream
- assertTrue("Not HDFS:" + fileSys.getUri(),
- fileSys instanceof DistributedFileSystem);
+ // need a raw stream
+ assertTrue(
+ fileSys instanceof DistributedFileSystem, "Not HDFS:" + fileSys.getUri());
HdfsDataInputStream dis = (HdfsDataInputStream) fileSys.open(name);
Collection dinfo = dis.getAllBlocks();
for (LocatedBlock blk : dinfo) { // for each block
@@ -931,7 +928,7 @@ public void testDecommissionWithMissingBlock() throws Exception {
decommissionNode(0, decommisionNodes, AdminStates.DECOMMISSIONED);
} catch (Exception e) {
LOG.error("Exception while decommissioning", e);
- Assert.fail("Shouldn't throw exception!");
+ Assertions.fail("Shouldn't throw exception!");
}
}).start();
decomStarted.await(5, TimeUnit.SECONDS);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeprecatedKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeprecatedKeys.java
index ad5c2a8e2a7cb..f91e4d0eb1888 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeprecatedKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeprecatedKeys.java
@@ -18,11 +18,11 @@
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertFalse;
import org.apache.hadoop.conf.Configuration;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
public class TestDeprecatedKeys {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDisableConnCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDisableConnCache.java
index 51a28d294f9ba..567f89778904c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDisableConnCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDisableConnCache.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -25,7 +25,7 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.client.impl.BlockReaderTestUtil;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
/**
* This class tests disabling client connection caching in a single node
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
index f7dcaef03959f..3e13295c794f5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
@@ -22,12 +22,7 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_FILE_CLOSE_NUM_COMMITTED_ALLOWED_KEY;
import static org.apache.hadoop.hdfs.client.HdfsAdmin.TRASH_PERMISSION;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_CONTEXT;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.inOrder;
import static org.mockito.Mockito.mock;
@@ -125,8 +120,8 @@
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.concurrent.HadoopExecutors;
-import org.junit.Assert;
import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
import org.mockito.InOrder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -226,13 +221,13 @@ public void testDFSClose() throws Exception {
types.add(OpenFilesIterator.OpenFilesType.ALL_OPEN_FILES);
RemoteIterator listOpenFiles =
fileSys.listOpenFiles(EnumSet.copyOf(types));
- assertTrue("Two files should be open", listOpenFiles.hasNext());
+ assertTrue(listOpenFiles.hasNext(), "Two files should be open");
int countOpenFiles = 0;
while (listOpenFiles.hasNext()) {
listOpenFiles.next();
++countOpenFiles;
}
- assertEquals("Mismatch of open files count", 2, countOpenFiles);
+ assertEquals(2, countOpenFiles, "Mismatch of open files count");
// create another file, close it, and read it, so
// the client gets a socket in its SocketCache
@@ -444,7 +439,7 @@ public void testDFSSeekExceptions() throws IOException {
// success
threw = true;
}
- assertTrue("Failed to throw IOE when seeking past end", threw);
+ assertTrue(threw, "Failed to throw IOE when seeking past end");
input.close();
threw = false;
try {
@@ -453,7 +448,7 @@ public void testDFSSeekExceptions() throws IOException {
//success
threw = true;
}
- assertTrue("Failed to throw IOE when seeking after close", threw);
+ assertTrue(threw, "Failed to throw IOE when seeking after close");
fileSys.close();
}
finally {
@@ -562,7 +557,7 @@ public void testDFSClient() throws Exception {
// Check to see if opening a non-existent file triggers a FNF
FileSystem fs = cluster.getFileSystem();
Path dir = new Path("/wrwelkj");
- assertFalse("File should not exist for test.", fs.exists(dir));
+ assertFalse(fs.exists(dir), "File should not exist for test.");
try {
FSDataInputStream in = fs.open(dir);
@@ -570,8 +565,8 @@ public void testDFSClient() throws Exception {
in.close();
fs.close();
} finally {
- assertTrue("Did not get a FileNotFoundException for non-existing" +
- " file.", false);
+ assertTrue(false, "Did not get a FileNotFoundException for non-existing" +
+ " file.");
}
} catch (FileNotFoundException fnf) {
// This is the proper exception to catch; move on.
@@ -621,11 +616,11 @@ public void testDFSClient() throws Exception {
fs.create(new Path("/tmp/nonEmptyDir/emptyFile")).close();
try {
fs.delete(new Path("/tmp/nonEmptyDir"), false);
- Assert.fail("Expecting PathIsNotEmptyDirectoryException");
+ Assertions.fail("Expecting PathIsNotEmptyDirectoryException");
} catch (PathIsNotEmptyDirectoryException ex) {
// This is the proper exception to catch; move on.
}
- Assert.assertTrue(fs.exists(new Path("/test/nonEmptyDir")));
+ Assertions.assertTrue(fs.exists(new Path("/test/nonEmptyDir")));
fs.delete(new Path("/tmp/nonEmptyDir"), true);
}
@@ -1055,8 +1050,8 @@ public void run() {
// wait until all threads are done
allDone.await();
- assertNull("Child failed with exception " + childError.get(),
- childError.get());
+ assertNull(
+ childError.get(), "Child failed with exception " + childError.get());
checkStatistics(fs, 0, numThreads, 0);
// check the single operation count stat
@@ -1068,8 +1063,8 @@ public void run() {
opCountIter.hasNext();) {
final LongStatistic opCount = opCountIter.next();
if (OpType.MKDIRS.getSymbol().equals(opCount.getName())) {
- assertEquals("Unexpected op count from iterator!",
- numThreads + oldMkdirOpCount, opCount.getValue());
+ assertEquals(
+ numThreads + oldMkdirOpCount, opCount.getValue(), "Unexpected op count from iterator!");
}
LOG.info(opCount.getName() + "\t" + opCount.getValue());
}
@@ -1187,8 +1182,8 @@ private void testReadFileSystemStatistics(int expectedDistance,
}
public static void checkOpStatistics(OpType op, long count) {
- assertEquals("Op " + op.getSymbol() + " has unexpected count!",
- count, getOpStatistics(op));
+ assertEquals(
+ count, getOpStatistics(op), "Op " + op.getSymbol() + " has unexpected count!");
}
public static long getOpStatistics(OpType op) {
@@ -1219,8 +1214,8 @@ public void testFileChecksum() throws Exception {
"/test/TestNonExistingFile"));
fail("Expecting FileNotFoundException");
} catch (FileNotFoundException e) {
- assertTrue("Not throwing the intended exception message", e.getMessage()
- .contains("File does not exist: /test/TestNonExistingFile"));
+ assertTrue(e.getMessage()
+ .contains("File does not exist: /test/TestNonExistingFile"), "Not throwing the intended exception message");
}
try {
@@ -1229,8 +1224,8 @@ public void testFileChecksum() throws Exception {
hdfs.getFileChecksum(path);
fail("Expecting FileNotFoundException");
} catch (FileNotFoundException e) {
- assertTrue("Not throwing the intended exception message", e.getMessage()
- .contains("Path is not a file: /test/TestExistingDir"));
+ assertTrue(e.getMessage()
+ .contains("Path is not a file: /test/TestExistingDir"), "Not throwing the intended exception message");
}
//webhdfs
@@ -1383,10 +1378,10 @@ public void testLocatedFileStatusStorageIdsTypes() throws Exception {
DFSTestUtil.waitForReplication(fs, testFile, (short) repl, 30000);
// Get the listing
RemoteIterator it = fs.listLocatedStatus(testFile);
- assertTrue("Expected file to be present", it.hasNext());
+ assertTrue(it.hasNext(), "Expected file to be present");
LocatedFileStatus stat = it.next();
BlockLocation[] locs = stat.getBlockLocations();
- assertEquals("Unexpected number of locations", numBlocks, locs.length);
+ assertEquals(numBlocks, locs.length, "Unexpected number of locations");
Set dnStorageIds = new HashSet<>();
for (DataNode d : cluster.getDataNodes()) {
@@ -1403,15 +1398,15 @@ public void testLocatedFileStatusStorageIdsTypes() throws Exception {
// Run it through a set to deduplicate, since there should be no dupes
Set storageIds = new HashSet<>();
Collections.addAll(storageIds, ids);
- assertEquals("Unexpected num storage ids", repl, storageIds.size());
- // Make sure these are all valid storage IDs
- assertTrue("Unknown storage IDs found!", dnStorageIds.containsAll
- (storageIds));
+ assertEquals(repl, storageIds.size(), "Unexpected num storage ids");
+ // Make sure these are all valid storage IDs
+ assertTrue(dnStorageIds.containsAll
+ (storageIds), "Unknown storage IDs found!");
// Check storage types are the default, since we didn't set any
StorageType[] types = loc.getStorageTypes();
- assertEquals("Unexpected num storage types", repl, types.length);
+ assertEquals(repl, types.length, "Unexpected num storage types");
for (StorageType t: types) {
- assertEquals("Unexpected storage type", StorageType.DEFAULT, t);
+ assertEquals(StorageType.DEFAULT, t, "Unexpected storage type");
}
}
} finally {
@@ -1489,9 +1484,9 @@ public void testFileCloseStatus() throws IOException {
// write to file
output.writeBytes("Some test data");
output.flush();
- assertFalse("File status should be open", fs.isFileClosed(file));
+ assertFalse(fs.isFileClosed(file), "File status should be open");
output.close();
- assertTrue("File status should be closed", fs.isFileClosed(file));
+ assertTrue(fs.isFileClosed(file), "File status should be closed");
} finally {
cluster.shutdown();
}
@@ -1596,7 +1591,7 @@ public void testDFSClientPeerReadTimeout() throws IOException {
long start = Time.now();
try {
peer.getInputStream().read();
- Assert.fail("read should timeout");
+ Assertions.fail("read should timeout");
} catch (SocketTimeoutException ste) {
long delta = Time.now() - start;
if (delta < timeout*0.9) {
@@ -1647,7 +1642,7 @@ public void testDFSClientPeerWriteTimeout() throws IOException {
byte[] buf = new byte[10 * 1024 * 1024];
peer.getOutputStream().write(buf);
long delta = Time.now() - start;
- Assert.fail("write finish in " + delta + " ms" + "but should timedout");
+ Assertions.fail("write finish in " + delta + " ms" + "but should timedout");
} catch (SocketTimeoutException ste) {
long delta = Time.now() - start;
@@ -1805,15 +1800,15 @@ public void testDFSDataOutputStreamBuilderForCreation() throws Exception {
} catch (FileNotFoundException e) {
// As expected.
}
- assertFalse("parent directory should not be created",
- fs.exists(new Path("/parent")));
+ assertFalse(
+ fs.exists(new Path("/parent")), "parent directory should not be created");
try (FSDataOutputStream out = fs.createFile(nonParentFile).recursive()
.build()) {
out.write(1);
}
- assertTrue("parent directory has not been created",
- fs.exists(new Path("/parent")));
+ assertTrue(
+ fs.exists(new Path("/parent")), "parent directory has not been created");
}
}
@@ -2007,7 +2002,7 @@ public void testEnableAndDisableErasureCodingPolicy() throws Exception {
//test enable a policy that doesn't exist
try {
fs.enableErasureCodingPolicy("notExistECName");
- Assert.fail("enable the policy that doesn't exist should fail");
+ Assertions.fail("enable the policy that doesn't exist should fail");
} catch (Exception e) {
GenericTestUtils.assertExceptionContains("does not exist", e);
// pass
@@ -2016,7 +2011,7 @@ public void testEnableAndDisableErasureCodingPolicy() throws Exception {
//test disable a policy that doesn't exist
try {
fs.disableErasureCodingPolicy("notExistECName");
- Assert.fail("disable the policy that doesn't exist should fail");
+ Assertions.fail("disable the policy that doesn't exist should fail");
} catch (Exception e) {
GenericTestUtils.assertExceptionContains("does not exist", e);
// pass
@@ -2078,7 +2073,7 @@ public void testStorageFavouredNodes()
.getBlockLocations(file1.toUri().getPath(), 0, Long.MAX_VALUE);
int numSSD = Collections.frequency(
Arrays.asList(locations[0].getStorageTypes()), StorageType.SSD);
- assertEquals("Number of SSD should be 1 but was : " + numSSD, 1, numSSD);
+ assertEquals(1, numSSD, "Number of SSD should be 1 but was : " + numSSD);
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystemWithECFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystemWithECFile.java
index 1a2c4de3974f6..265dd1cfa1fd1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystemWithECFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystemWithECFile.java
@@ -28,10 +28,10 @@
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
import org.apache.hadoop.io.IOUtils;
-import org.junit.After;
-import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
import org.junit.rules.Timeout;
import java.io.IOException;
@@ -39,9 +39,7 @@
import java.util.ArrayList;
import java.util.List;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
/**
* Testing correctness of FileSystem.getFileBlockLocations and
@@ -69,7 +67,7 @@ public ErasureCodingPolicy getEcPolicy() {
@Rule
public final Timeout globalTimeout = new Timeout(60000 * 3);
- @Before
+ @BeforeEach
public void setup() throws IOException {
ecPolicy = getEcPolicy();
cellSize = ecPolicy.getCellSize();
@@ -92,7 +90,7 @@ public void setup() throws IOException {
ecPolicy.getName());
}
- @After
+ @AfterEach
public void tearDown() throws IOException {
if (cluster != null) {
cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java
index f9336fcfdc74e..e7ef41f03ea49 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java
@@ -17,10 +17,7 @@
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
import static org.mockito.Mockito.times;
import java.io.IOException;
@@ -53,11 +50,11 @@
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
-import org.junit.After;
-import org.junit.Before;
import org.junit.Rule;
-import org.junit.Test;
-import org.junit.Assert;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
import org.junit.rules.Timeout;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
@@ -118,12 +115,12 @@ public TestEncryptedTransfer(String resolverClazz){
this.resolverClazz = resolverClazz;
}
- @Before
+ @BeforeEach
public void setup() throws IOException {
conf = new Configuration();
}
- @After
+ @AfterEach
public void teardown() throws IOException {
if (fs != null) {
fs.close();
@@ -365,11 +362,11 @@ public Boolean get() {
LOG.info("The encryption key is invalid on all nodes now.");
fs.getFileChecksum(TEST_PATH);
// verify that InvalidEncryptionKeyException is handled properly
- Assert.assertTrue(client.getEncryptionKey() == null);
+ Assertions.assertTrue(client.getEncryptionKey() == null);
Mockito.verify(spyClient, times(1)).clearDataEncryptionKey();
// Retry the operation after clearing the encryption key
FileChecksum verifyChecksum = fs.getFileChecksum(TEST_PATH);
- Assert.assertEquals(checksum, verifyChecksum);
+ Assertions.assertEquals(checksum, verifyChecksum);
}
@Test
@@ -428,8 +425,8 @@ public Boolean get() {
// write data to induce pipeline recovery
out.write(PLAIN_TEXT.getBytes());
out.hflush();
- assertFalse("The first datanode in the pipeline was not replaced.",
- Arrays.asList(dfstream.getPipeline()).contains(targets[0]));
+ assertFalse(
+ Arrays.asList(dfstream.getPipeline()).contains(targets[0]), "The first datanode in the pipeline was not replaced.");
}
// verify that InvalidEncryptionKeyException is handled properly
Mockito.verify(spyClient, times(1)).clearDataEncryptionKey();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
index 0775e0426d0f2..78250fa7640e7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
@@ -17,40 +17,14 @@
*/
package org.apache.hadoop.hdfs;
-import java.io.ByteArrayOutputStream;
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.io.PrintStream;
-import java.io.RandomAccessFile;
-import java.io.StringReader;
-import java.net.HttpURLConnection;
-import java.net.InetSocketAddress;
-import java.net.URI;
-import java.net.URL;
-import java.security.PrivilegedExceptionAction;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.EnumSet;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.Callable;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-
-import org.apache.hadoop.test.GenericTestUtils;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.CipherSuite;
import org.apache.hadoop.crypto.CryptoInputStream;
import org.apache.hadoop.crypto.CryptoProtocolVersion;
import org.apache.hadoop.crypto.key.JavaKeyStoreProvider;
import org.apache.hadoop.crypto.key.KeyProvider;
+import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.CryptoExtension;
+import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension.DelegationTokenExtension;
import org.apache.hadoop.crypto.key.KeyProviderFactory;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FSDataInputStream;
@@ -74,9 +48,9 @@
import org.apache.hadoop.hdfs.client.HdfsAdmin;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
@@ -91,35 +65,60 @@
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AuthorizationException;
-import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.DelegationTokenIssuer;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.Lists;
import org.apache.hadoop.util.ToolRunner;
-import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension.DelegationTokenExtension;
-import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.CryptoExtension;
-import org.apache.hadoop.io.Text;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
import org.junit.rules.Timeout;
import org.mockito.Mockito;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.slf4j.event.Level;
+import org.xml.sax.InputSource;
+import org.xml.sax.helpers.DefaultHandler;
+
+import javax.xml.parsers.SAXParser;
+import javax.xml.parsers.SAXParserFactory;
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.io.PrintStream;
+import java.io.RandomAccessFile;
+import java.io.StringReader;
+import java.net.HttpURLConnection;
+import java.net.InetSocketAddress;
+import java.net.URI;
+import java.net.URL;
+import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.EnumSet;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.Callable;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
import static org.apache.hadoop.fs.CommonConfigurationKeys.DFS_CLIENT_IGNORE_NAMENODE_DEFAULT_KMS_URI;
-import static org.junit.Assert.assertNotNull;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.anyBoolean;
-import static org.mockito.ArgumentMatchers.anyLong;
-import static org.mockito.ArgumentMatchers.anyShort;
-import static org.mockito.Mockito.withSettings;
-import static org.mockito.Mockito.anyString;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_DEFAULT;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
@@ -138,21 +137,19 @@
import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.slf4j.event.Level;
-import org.xml.sax.InputSource;
-import org.xml.sax.helpers.DefaultHandler;
-
-import javax.xml.parsers.SAXParser;
-import javax.xml.parsers.SAXParserFactory;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyBoolean;
+import static org.mockito.ArgumentMatchers.anyLong;
+import static org.mockito.ArgumentMatchers.anyShort;
+import static org.mockito.Mockito.anyString;
+import static org.mockito.Mockito.withSettings;
public class TestEncryptionZones {
static final Logger LOG = LoggerFactory.getLogger(TestEncryptionZones.class);
@@ -184,7 +181,7 @@ protected String getKeyProviderURI() {
@Rule
public Timeout globalTimeout = new Timeout(120 * 1000);
- @Before
+ @BeforeEach
public void setup() throws Exception {
conf = new HdfsConfiguration();
fsHelper = new FileSystemTestHelper();
@@ -218,7 +215,7 @@ protected void setProvider() {
.getProvider());
}
- @After
+ @AfterEach
public void teardown() {
if (cluster != null) {
cluster.shutdown();
@@ -234,7 +231,7 @@ public void assertNumZones(final int numZones) throws IOException {
count++;
it.next();
}
- assertEquals("Unexpected number of encryption zones!", numZones, count);
+ assertEquals(numZones, count, "Unexpected number of encryption zones!");
}
/**
@@ -261,9 +258,9 @@ public void assertZonePresent(String keyName, String path) throws IOException {
break;
}
}
- assertTrue("Did not find expected encryption zone with keyName " + keyName +
- " path " + path, match
- );
+ assertTrue(match
+ , "Did not find expected encryption zone with keyName " + keyName +
+ " path " + path);
}
/**
@@ -298,7 +295,7 @@ public void testTrashStickyBit() throws Exception {
final FsShell shell = new FsShell(clientConf);
String[] argv = new String[]{"-rm", ezfile1.toString()};
int res = ToolRunner.run(shell, argv);
- assertEquals("Can't remove a file in EZ as superuser", 0, res);
+ assertEquals(0, res, "Can't remove a file in EZ as superuser");
final Path trashDir = new Path(zone1, FileSystem.TRASH_PREFIX);
assertTrue(fsWrapper.exists(trashDir));
@@ -323,7 +320,7 @@ public Object run() throws Exception {
// /zones/zone1/.Trash/user/Current/zones/zone1/file2
String[] argv = new String[]{"-rm", ezfile2.toString()};
int res = ToolRunner.run(shell, argv);
- assertEquals("Can't remove a file in EZ as user:mygroup", 0, res);
+ assertEquals(0, res, "Can't remove a file in EZ as user:mygroup");
return null;
}
});
@@ -370,23 +367,23 @@ public void testEncryptionZonesDictCp() throws Exception {
fail("Exception should be thrown while setting: " +
xattrName + " on file:" + raw2File);
} catch (RemoteException e) {
- Assert.assertEquals(e.getClassName(),
+ Assertions.assertEquals(e.getClassName(),
IllegalArgumentException.class.getCanonicalName());
- Assert.assertTrue(e.getMessage().
+ Assertions.assertTrue(e.getMessage().
contains("does not belong to the key"));
}
}
}
- assertEquals("File can be created on the root encryption zone " +
- "with correct length", len, fs.getFileStatus(zone1File).getLen());
- assertTrue("/zone1 dir is encrypted",
- fs.getFileStatus(zone1).isEncrypted());
- assertTrue("File is encrypted", fs.getFileStatus(zone1File).isEncrypted());
+ assertEquals(len, fs.getFileStatus(zone1File).getLen(), "File can be created on the root encryption zone " +
+ "with correct length");
+ assertTrue(
+ fs.getFileStatus(zone1).isEncrypted(), "/zone1 dir is encrypted");
+ assertTrue(fs.getFileStatus(zone1File).isEncrypted(), "File is encrypted");
- assertTrue("/zone2 dir is encrypted",
- fs.getFileStatus(zone2).isEncrypted());
- assertTrue("File is encrypted", fs.getFileStatus(zone2File).isEncrypted());
+ assertTrue(
+ fs.getFileStatus(zone2).isEncrypted(), "/zone2 dir is encrypted");
+ assertTrue(fs.getFileStatus(zone2File).isEncrypted(), "File is encrypted");
// 4. Now the decrypted contents of the files should be different.
DFSTestUtil.verifyFilesNotEqual(fs, zone1File, zone2File, len);
@@ -418,7 +415,7 @@ public void testProvisionTrash() throws Exception {
final Path trashDir = new Path(zone1, FileSystem.TRASH_PREFIX);
String[] argv = new String[]{"-rmdir", trashDir.toUri().getPath()};
int res = ToolRunner.run(shell, argv);
- assertEquals("Unable to delete trash directory.", 0, res);
+ assertEquals(0, res, "Unable to delete trash directory.");
assertFalse(fsWrapper.exists(trashDir));
// execute -provisionTrash command option and make sure the trash
@@ -436,11 +433,11 @@ public void testProvisionTrash() throws Exception {
@Test
public void testBasicOperations() throws Exception {
- assertNotNull("key provider is not present", dfsAdmin.getKeyProvider());
+ assertNotNull(dfsAdmin.getKeyProvider(), "key provider is not present");
int numZones = 0;
- /* Number of EZs should be 0 if no EZ is created */
- assertEquals("Unexpected number of encryption zones!", numZones,
- cluster.getNamesystem().getNumEncryptionZones());
+ /* Number of EZs should be 0 if no EZ is created */
+ assertEquals(numZones,
+ cluster.getNamesystem().getNumEncryptionZones(), "Unexpected number of encryption zones!");
/* Test failure of create EZ on a directory that doesn't exist. */
final Path zoneParent = new Path("/zones");
final Path zone1 = new Path(zoneParent, "zone1");
@@ -577,8 +574,8 @@ public Object run() throws Exception {
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
cluster.restartNameNode(true);
assertNumZones(numZones);
- assertEquals("Unexpected number of encryption zones!", numZones, cluster
- .getNamesystem().getNumEncryptionZones());
+ assertEquals(numZones, cluster
+ .getNamesystem().getNumEncryptionZones(), "Unexpected number of encryption zones!");
assertGauge("NumEncryptionZones", numZones, getMetrics(NS_METRICS));
assertZonePresent(null, zone1.toString());
@@ -635,14 +632,14 @@ public void testEZwithFullyQualifiedPath() throws Exception {
assertZonePresent(TEST_KEY, zone1.toString());
// Check that zone1 contains a .Trash directory
final Path zone1Trash = new Path(zone1, fs.TRASH_PREFIX);
- assertTrue("CreateEncryptionZone with trash enabled should create a " +
- ".Trash directory in the EZ", fs.exists(zone1Trash));
+ assertTrue(fs.exists(zone1Trash), "CreateEncryptionZone with trash enabled should create a " +
+ ".Trash directory in the EZ");
// getEncryptionZoneForPath for FQP should return the path component
EncryptionZone ezForZone1 = dfsAdmin.getEncryptionZoneForPath(zone1FQP);
- assertTrue("getEncryptionZoneForPath for fully qualified path should " +
- "return the path component",
- ezForZone1.getPath().equals(zone1.toString()));
+ assertTrue(
+ ezForZone1.getPath().equals(zone1.toString()), "getEncryptionZoneForPath for fully qualified path should " +
+ "return the path component");
// Create EZ without Trash
fsWrapper.mkdir(zone2FQP, FsPermission.getDirDefault(), true);
@@ -655,8 +652,9 @@ public void testEZwithFullyQualifiedPath() throws Exception {
EncryptionZone ezForZone2 = dfsAdmin.getEncryptionZoneForPath(zone2FQP);
Path ezTrashForZone2 = new Path(ezForZone2.getPath(),
FileSystem.TRASH_PREFIX);
- assertTrue("provisionEZTrash with fully qualified path should create " +
- "trash directory ", fsWrapper.exists(ezTrashForZone2));
+ assertTrue(fsWrapper.exists(ezTrashForZone2),
+ "provisionEZTrash with fully qualified path should create "
+ + "trash directory ");
}
/**
@@ -741,13 +739,13 @@ public Object run() throws Exception {
*/
}
- // Check operation with accessible paths
- assertEquals("expected ez path", allPath.toString(),
- userAdmin.getEncryptionZoneForPath(allPath).getPath().
- toString());
- assertEquals("expected ez path", allPath.toString(),
- userAdmin.getEncryptionZoneForPath(allPathFile).getPath().
- toString());
+ // Check operation with accessible paths
+ assertEquals(allPath.toString(),
+ userAdmin.getEncryptionZoneForPath(allPath).getPath().
+ toString(), "expected ez path");
+ assertEquals(allPath.toString(),
+ userAdmin.getEncryptionZoneForPath(allPathFile).getPath().
+ toString(), "expected ez path");
// Check operation with inaccessible (lack of permissions) path
try {
@@ -757,39 +755,39 @@ public Object run() throws Exception {
assertExceptionContains("Permission denied:", e);
}
- assertNull("expected null for nonexistent path",
- userAdmin.getEncryptionZoneForPath(nonexistent));
+ assertNull(
+ userAdmin.getEncryptionZoneForPath(nonexistent), "expected null for nonexistent path");
- // Check operation with non-ez paths
- assertNull("expected null for non-ez path",
- userAdmin.getEncryptionZoneForPath(nonEZDir));
- assertNull("expected null for non-ez path",
- userAdmin.getEncryptionZoneForPath(nonEZFile));
+ // Check operation with non-ez paths
+ assertNull(
+ userAdmin.getEncryptionZoneForPath(nonEZDir), "expected null for non-ez path");
+ assertNull(
+ userAdmin.getEncryptionZoneForPath(nonEZFile), "expected null for non-ez path");
// Check operation with snapshots
String snapshottedAllPath = newSnap.toString() + allPath.toString();
- assertEquals("expected ez path", allPath.toString(),
- userAdmin.getEncryptionZoneForPath(
- new Path(snapshottedAllPath)).getPath().toString());
+ assertEquals(allPath.toString(),
+ userAdmin.getEncryptionZoneForPath(
+ new Path(snapshottedAllPath)).getPath().toString(), "expected ez path");
/*
* Delete the file from the non-snapshot and test that it is still ok
* in the ez.
*/
fs.delete(allPathFile, false);
- assertEquals("expected ez path", allPath.toString(),
- userAdmin.getEncryptionZoneForPath(
- new Path(snapshottedAllPath)).getPath().toString());
+ assertEquals(allPath.toString(),
+ userAdmin.getEncryptionZoneForPath(
+ new Path(snapshottedAllPath)).getPath().toString(), "expected ez path");
// Delete the ez and make sure ss's ez is still ok.
fs.delete(allPath, true);
- assertEquals("expected ez path", allPath.toString(),
- userAdmin.getEncryptionZoneForPath(
- new Path(snapshottedAllPath)).getPath().toString());
- assertNull("expected null for deleted file path",
- userAdmin.getEncryptionZoneForPath(allPathFile));
- assertNull("expected null for deleted directory path",
- userAdmin.getEncryptionZoneForPath(allPath));
+ assertEquals(allPath.toString(),
+ userAdmin.getEncryptionZoneForPath(
+ new Path(snapshottedAllPath)).getPath().toString(), "expected ez path");
+ assertNull(
+ userAdmin.getEncryptionZoneForPath(allPathFile), "expected null for deleted file path");
+ assertNull(
+ userAdmin.getEncryptionZoneForPath(allPath), "expected null for deleted directory path");
return null;
}
});
@@ -821,16 +819,16 @@ private void doRenameEncryptionZone(FSTestWrapper wrapper) throws Exception {
// Verify that we can rename dir and files within an encryption zone.
assertTrue(fs.rename(pathFooBaz, pathFooBar));
- assertTrue("Rename of dir and file within ez failed",
- !wrapper.exists(pathFooBaz) && wrapper.exists(pathFooBar));
- assertEquals("Renamed file contents not the same",
- contents, DFSTestUtil.readFile(fs, pathFooBarFile));
+ assertTrue(!wrapper.exists(pathFooBaz) && wrapper.exists(pathFooBar),
+ "Rename of dir and file within ez failed");
+ assertEquals(
+ contents, DFSTestUtil.readFile(fs, pathFooBarFile), "Renamed file contents not the same");
// Verify that we can rename an EZ root
final Path newFoo = new Path(testRoot, "newfoo");
- assertTrue("Rename of EZ root", fs.rename(pathFoo, newFoo));
- assertTrue("Rename of EZ root failed",
- !wrapper.exists(pathFoo) && wrapper.exists(newFoo));
+ assertTrue(fs.rename(pathFoo, newFoo), "Rename of EZ root");
+ assertTrue(!wrapper.exists(pathFoo) && wrapper.exists(newFoo),
+ "Rename of EZ root failed");
// Verify that we can't rename an EZ root onto itself
try {
@@ -884,11 +882,11 @@ public void testReadWrite() throws Exception {
// FEInfos should be different
FileEncryptionInfo feInfo1 = getFileEncryptionInfo(encFile1);
FileEncryptionInfo feInfo2 = getFileEncryptionInfo(encFile2);
- assertFalse("EDEKs should be different", Arrays
- .equals(feInfo1.getEncryptedDataEncryptionKey(),
- feInfo2.getEncryptedDataEncryptionKey()));
- assertNotEquals("Key was rolled, versions should be different",
- feInfo1.getEzKeyVersionName(), feInfo2.getEzKeyVersionName());
+ assertFalse(Arrays
+ .equals(feInfo1.getEncryptedDataEncryptionKey(),
+ feInfo2.getEncryptedDataEncryptionKey()), "EDEKs should be different");
+ assertNotEquals(
+ feInfo1.getEzKeyVersionName(), feInfo2.getEzKeyVersionName(), "Key was rolled, versions should be different");
// Contents still equal
verifyFilesEqual(fs, encFile1, encFile2, len);
}
@@ -991,13 +989,13 @@ public void testVersionAndSuiteNegotiation() throws Exception {
CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH)),
conf);
List keys = provider.getKeys();
- assertEquals("Expected NN to have created one key per zone", 1,
- keys.size());
+ assertEquals(1,
+ keys.size(), "Expected NN to have created one key per zone");
List allVersions = Lists.newArrayList();
for (String key : keys) {
List versions = provider.getKeyVersions(key);
- assertEquals("Should only have one key version per key", 1,
- versions.size());
+ assertEquals(1,
+ versions.size(), "Should only have one key version per key");
allVersions.addAll(versions);
}
// Check that the specified CipherSuite was correctly saved on the NN
@@ -1123,8 +1121,8 @@ private void dTIEM(Path prefix) throws Exception {
final Path baseFile = new Path(prefix, "base");
fsWrapper.createFile(baseFile);
FileStatus stat = fsWrapper.getFileStatus(baseFile);
- assertFalse("Expected isEncrypted to return false for " + baseFile,
- stat.isEncrypted());
+ assertFalse(
+ stat.isEncrypted(), "Expected isEncrypted to return false for " + baseFile);
// Create an encrypted file to check isEncrypted returns true
final Path zone = new Path(prefix, "zone");
@@ -1133,57 +1131,57 @@ private void dTIEM(Path prefix) throws Exception {
final Path encFile = new Path(zone, "encfile");
fsWrapper.createFile(encFile);
stat = fsWrapper.getFileStatus(encFile);
- assertTrue("Expected isEncrypted to return true for enc file" + encFile,
- stat.isEncrypted());
+ assertTrue(
+ stat.isEncrypted(), "Expected isEncrypted to return true for enc file" + encFile);
// check that it returns true for an ez root
stat = fsWrapper.getFileStatus(zone);
- assertTrue("Expected isEncrypted to return true for ezroot",
- stat.isEncrypted());
+ assertTrue(
+ stat.isEncrypted(), "Expected isEncrypted to return true for ezroot");
// check that it returns true for a dir in the ez
final Path zoneSubdir = new Path(zone, "subdir");
fsWrapper.mkdir(zoneSubdir, FsPermission.getDirDefault(), true);
stat = fsWrapper.getFileStatus(zoneSubdir);
- assertTrue(
- "Expected isEncrypted to return true for ez subdir " + zoneSubdir,
- stat.isEncrypted());
+ assertTrue(
+ stat.isEncrypted(),
+ "Expected isEncrypted to return true for ez subdir " + zoneSubdir);
// check that it returns false for a non ez dir
final Path nonEzDirPath = new Path(prefix, "nonzone");
fsWrapper.mkdir(nonEzDirPath, FsPermission.getDirDefault(), true);
stat = fsWrapper.getFileStatus(nonEzDirPath);
- assertFalse(
- "Expected isEncrypted to return false for directory " + nonEzDirPath,
- stat.isEncrypted());
+ assertFalse(
+ stat.isEncrypted(),
+ "Expected isEncrypted to return false for directory " + nonEzDirPath);
// check that it returns true for listings within an ez
FileStatus[] statuses = fsWrapper.listStatus(zone);
for (FileStatus s : statuses) {
- assertTrue("Expected isEncrypted to return true for ez stat " + zone,
- s.isEncrypted());
+ assertTrue(
+ s.isEncrypted(), "Expected isEncrypted to return true for ez stat " + zone);
}
statuses = fsWrapper.listStatus(encFile);
for (FileStatus s : statuses) {
- assertTrue(
- "Expected isEncrypted to return true for ez file stat " + encFile,
- s.isEncrypted());
+ assertTrue(
+ s.isEncrypted(),
+ "Expected isEncrypted to return true for ez file stat " + encFile);
}
// check that it returns false for listings outside an ez
statuses = fsWrapper.listStatus(nonEzDirPath);
for (FileStatus s : statuses) {
- assertFalse(
- "Expected isEncrypted to return false for nonez stat " + nonEzDirPath,
- s.isEncrypted());
+ assertFalse(
+ s.isEncrypted(),
+ "Expected isEncrypted to return false for nonez stat " + nonEzDirPath);
}
statuses = fsWrapper.listStatus(baseFile);
for (FileStatus s : statuses) {
- assertFalse(
- "Expected isEncrypted to return false for non ez stat " + baseFile,
- s.isEncrypted());
+ assertFalse(
+ s.isEncrypted(),
+ "Expected isEncrypted to return false for non ez stat " + baseFile);
}
}
@@ -1314,8 +1312,8 @@ public void testStartFileRetry() throws Exception {
executor.submit(new InjectFaultTask() {
@Override
public void doCleanup() throws Exception {
- assertEquals("Expected no startFile key generation",
- -1, injector.generateCount);
+ assertEquals(
+ -1, injector.generateCount, "Expected no startFile key generation");
fsWrapper.delete(file, false);
}
}).get();
@@ -1331,7 +1329,7 @@ public void doFault() throws Exception {
}
@Override
public void doCleanup() throws Exception {
- assertEquals("Expected no startFile retries", 1, injector.generateCount);
+ assertEquals(1, injector.generateCount, "Expected no startFile retries");
fsWrapper.delete(file, false);
}
}).get();
@@ -1353,7 +1351,7 @@ public void doFault() throws Exception {
}
@Override
public void doCleanup() throws Exception {
- assertEquals("Expected a startFile retry", 2, injector.generateCount);
+ assertEquals(2, injector.generateCount, "Expected a startFile retry");
fsWrapper.delete(zone1, true);
}
}).get();
@@ -1422,9 +1420,9 @@ public void testDelegationToken() throws Exception {
Credentials creds = new Credentials();
final Token> tokens[] = dfs.addDelegationTokens("JobTracker", creds);
LOG.debug("Delegation tokens: " + Arrays.asList(tokens));
- Assert.assertEquals(2, tokens.length);
- Assert.assertEquals(tokens[1], testToken);
- Assert.assertEquals(2, creds.numberOfTokens());
+ Assertions.assertEquals(2, tokens.length);
+ Assertions.assertEquals(tokens[1], testToken);
+ Assertions.assertEquals(2, creds.numberOfTokens());
}
/**
@@ -1443,18 +1441,18 @@ public void testFsckOnEncryptionZones() throws Exception {
PrintStream out = new PrintStream(bStream, true);
int errCode = ToolRunner.run(new DFSck(conf, out),
new String[]{ "/" });
- assertEquals("Fsck ran with non-zero error code", 0, errCode);
+ assertEquals(0, errCode, "Fsck ran with non-zero error code");
String result = bStream.toString();
- assertTrue("Fsck did not return HEALTHY status",
- result.contains(NamenodeFsck.HEALTHY_STATUS));
+ assertTrue(
+ result.contains(NamenodeFsck.HEALTHY_STATUS), "Fsck did not return HEALTHY status");
// Run fsck directly on the encryption zone instead of root
errCode = ToolRunner.run(new DFSck(conf, out),
new String[]{ zoneParent.toString() });
- assertEquals("Fsck ran with non-zero error code", 0, errCode);
+ assertEquals(0, errCode, "Fsck ran with non-zero error code");
result = bStream.toString();
- assertTrue("Fsck did not return HEALTHY status",
- result.contains(NamenodeFsck.HEALTHY_STATUS));
+ assertTrue(
+ result.contains(NamenodeFsck.HEALTHY_STATUS), "Fsck did not return HEALTHY status");
}
/**
@@ -1477,8 +1475,8 @@ public void testSnapshotsOnEncryptionZones() throws Exception {
String contents = DFSTestUtil.readFile(fs, zoneFile);
final Path snap1 = fs.createSnapshot(zoneParent, "snap1");
final Path snap1Zone = new Path(snap1, zone.getName());
- assertEquals("Got unexpected ez path", zone.toString(),
- dfsAdmin.getEncryptionZoneForPath(snap1Zone).getPath().toString());
+ assertEquals(zone.toString(),
+ dfsAdmin.getEncryptionZoneForPath(snap1Zone).getPath().toString(), "Got unexpected ez path");
// Now delete the encryption zone, recreate the dir, and take another
// snapshot
@@ -1486,34 +1484,34 @@ public void testSnapshotsOnEncryptionZones() throws Exception {
fsWrapper.mkdir(zone, FsPermission.getDirDefault(), true);
final Path snap2 = fs.createSnapshot(zoneParent, "snap2");
final Path snap2Zone = new Path(snap2, zone.getName());
- assertEquals("Got unexpected ez path", zone.toString(),
- dfsAdmin.getEncryptionZoneForPath(snap1Zone).getPath().toString());
- assertNull("Expected null ez path",
- dfsAdmin.getEncryptionZoneForPath(snap2Zone));
+ assertEquals(zone.toString(),
+ dfsAdmin.getEncryptionZoneForPath(snap1Zone).getPath().toString(), "Got unexpected ez path");
+ assertNull(
+ dfsAdmin.getEncryptionZoneForPath(snap2Zone), "Expected null ez path");
// Create the encryption zone again, and that shouldn't affect old snapshot
dfsAdmin.createEncryptionZone(zone, TEST_KEY2, NO_TRASH);
EncryptionZone ezSnap1 = dfsAdmin.getEncryptionZoneForPath(snap1Zone);
- assertEquals("Got unexpected ez path", zone.toString(),
- ezSnap1.getPath().toString());
- assertEquals("Unexpected ez key", TEST_KEY, ezSnap1.getKeyName());
- assertNull("Expected null ez path",
- dfsAdmin.getEncryptionZoneForPath(snap2Zone));
+ assertEquals(zone.toString(),
+ ezSnap1.getPath().toString(), "Got unexpected ez path");
+ assertEquals(TEST_KEY, ezSnap1.getKeyName(), "Unexpected ez key");
+ assertNull(
+ dfsAdmin.getEncryptionZoneForPath(snap2Zone), "Expected null ez path");
final Path snap3 = fs.createSnapshot(zoneParent, "snap3");
final Path snap3Zone = new Path(snap3, zone.getName());
// Check that snap3's EZ has the correct settings
EncryptionZone ezSnap3 = dfsAdmin.getEncryptionZoneForPath(snap3Zone);
- assertEquals("Got unexpected ez path", zone.toString(),
- ezSnap3.getPath().toString());
- assertEquals("Unexpected ez key", TEST_KEY2, ezSnap3.getKeyName());
+ assertEquals(zone.toString(),
+ ezSnap3.getPath().toString(), "Got unexpected ez path");
+ assertEquals(TEST_KEY2, ezSnap3.getKeyName(), "Unexpected ez key");
// Check that older snapshots still have the old EZ settings
ezSnap1 = dfsAdmin.getEncryptionZoneForPath(snap1Zone);
- assertEquals("Got unexpected ez path", zone.toString(),
- ezSnap1.getPath().toString());
- assertEquals("Unexpected ez key", TEST_KEY, ezSnap1.getKeyName());
- assertNull("Expected null ez path",
- dfsAdmin.getEncryptionZoneForPath(snap2Zone));
+ assertEquals(zone.toString(),
+ ezSnap1.getPath().toString(), "Got unexpected ez path");
+ assertEquals(TEST_KEY, ezSnap1.getKeyName(), "Unexpected ez key");
+ assertNull(
+ dfsAdmin.getEncryptionZoneForPath(snap2Zone), "Expected null ez path");
// Check that listEZs only shows the current filesystem state
ArrayList listZones = Lists.newArrayList();
@@ -1524,29 +1522,29 @@ public void testSnapshotsOnEncryptionZones() throws Exception {
for (EncryptionZone z: listZones) {
System.out.println(z);
}
- assertEquals("Did not expect additional encryption zones!", 1,
- listZones.size());
+ assertEquals(1,
+ listZones.size(), "Did not expect additional encryption zones!");
EncryptionZone listZone = listZones.get(0);
- assertEquals("Got unexpected ez path", zone.toString(),
- listZone.getPath().toString());
- assertEquals("Unexpected ez key", TEST_KEY2, listZone.getKeyName());
+ assertEquals(zone.toString(),
+ listZone.getPath().toString(), "Got unexpected ez path");
+ assertEquals(TEST_KEY2, listZone.getKeyName(), "Unexpected ez key");
// Verify contents of the snapshotted file
final Path snapshottedZoneFile = new Path(
snap1.toString() + "/" + zone.getName() + "/" + zoneFile.getName());
- assertEquals("Contents of snapshotted file have changed unexpectedly",
- contents, DFSTestUtil.readFile(fs, snapshottedZoneFile));
+ assertEquals(
+ contents, DFSTestUtil.readFile(fs, snapshottedZoneFile), "Contents of snapshotted file have changed unexpectedly");
// Now delete the snapshots out of order and verify the zones are still
// correct
fs.deleteSnapshot(zoneParent, snap2.getName());
- assertEquals("Got unexpected ez path", zone.toString(),
- dfsAdmin.getEncryptionZoneForPath(snap1Zone).getPath().toString());
- assertEquals("Got unexpected ez path", zone.toString(),
- dfsAdmin.getEncryptionZoneForPath(snap3Zone).getPath().toString());
+ assertEquals(zone.toString(),
+ dfsAdmin.getEncryptionZoneForPath(snap1Zone).getPath().toString(), "Got unexpected ez path");
+ assertEquals(zone.toString(),
+ dfsAdmin.getEncryptionZoneForPath(snap3Zone).getPath().toString(), "Got unexpected ez path");
fs.deleteSnapshot(zoneParent, snap1.getName());
- assertEquals("Got unexpected ez path", zone.toString(),
- dfsAdmin.getEncryptionZoneForPath(snap3Zone).getPath().toString());
+ assertEquals(zone.toString(),
+ dfsAdmin.getEncryptionZoneForPath(snap3Zone).getPath().toString(), "Got unexpected ez path");
}
/**
@@ -1570,16 +1568,16 @@ public void testSnapshotWithFile() throws Exception {
// Now delete the file and create encryption zone
fsWrapper.delete(zoneFile, false);
dfsAdmin.createEncryptionZone(zone, TEST_KEY, NO_TRASH);
- assertEquals("Got unexpected ez path", zone.toString(),
- dfsAdmin.getEncryptionZoneForPath(zone).getPath());
+ assertEquals(zone.toString(),
+ dfsAdmin.getEncryptionZoneForPath(zone).getPath(), "Got unexpected ez path");
// The file in snapshot shouldn't have any encryption info
final Path snapshottedZoneFile = new Path(
snap1 + "/" + zone.getName() + "/" + zoneFile.getName());
FileEncryptionInfo feInfo = getFileEncryptionInfo(snapshottedZoneFile);
- assertNull("Expected null ez info", feInfo);
- assertEquals("Contents of snapshotted file have changed unexpectedly",
- contents, DFSTestUtil.readFile(fs, snapshottedZoneFile));
+ assertNull(feInfo, "Expected null ez info");
+ assertEquals(
+ contents, DFSTestUtil.readFile(fs, snapshottedZoneFile), "Contents of snapshotted file have changed unexpectedly");
}
/**
@@ -1641,8 +1639,8 @@ public void testEncryptionZonesWithSymlinks() throws Exception {
DFSTestUtil.createFile(fs, target, len, (short)1, 0xFEED);
String content = DFSTestUtil.readFile(fs, target);
fs.createSymlink(target, link, false);
- assertEquals("Contents read from link are not the same as target",
- content, DFSTestUtil.readFile(fs, link));
+ assertEquals(
+ content, DFSTestUtil.readFile(fs, link), "Contents read from link are not the same as target");
fs.delete(parent, true);
// Now let's test when the symlink and target are in different
@@ -1654,8 +1652,8 @@ public void testEncryptionZonesWithSymlinks() throws Exception {
DFSTestUtil.createFile(fs, target, len, (short)1, 0xFEED);
content = DFSTestUtil.readFile(fs, target);
fs.createSymlink(target, link, false);
- assertEquals("Contents read from link are not the same as target",
- content, DFSTestUtil.readFile(fs, link));
+ assertEquals(
+ content, DFSTestUtil.readFile(fs, link), "Contents read from link are not the same as target");
fs.delete(link, true);
fs.delete(target, true);
}
@@ -1725,13 +1723,13 @@ public void testEncryptionZonesOnRootPath() throws Exception {
dfsAdmin.createEncryptionZone(rootDir, TEST_KEY, NO_TRASH);
DFSTestUtil.createFile(fs, zoneFile, len, (short) 1, 0xFEED);
- assertEquals("File can be created on the root encryption zone " +
- "with correct length",
- len, fs.getFileStatus(zoneFile).getLen());
- assertEquals("Root dir is encrypted",
- true, fs.getFileStatus(rootDir).isEncrypted());
- assertEquals("File is encrypted",
- true, fs.getFileStatus(zoneFile).isEncrypted());
+ assertEquals(
+ len, fs.getFileStatus(zoneFile).getLen(), "File can be created on the root encryption zone " +
+ "with correct length");
+ assertEquals(
+ true, fs.getFileStatus(rootDir).isEncrypted(), "Root dir is encrypted");
+ assertEquals(
+ true, fs.getFileStatus(zoneFile).isEncrypted(), "File is encrypted");
DFSTestUtil.verifyFilesNotEqual(fs, zoneFile, rawFile, len);
}
@@ -1749,8 +1747,8 @@ public void testEncryptionZonesOnRelativePath() throws Exception {
assertNumZones(1);
assertZonePresent(TEST_KEY, "/somewhere/base/zone");
- assertEquals("Got unexpected ez path", "/somewhere/base/zone", dfsAdmin
- .getEncryptionZoneForPath(zoneDir).getPath().toString());
+ assertEquals("/somewhere/base/zone", dfsAdmin
+ .getEncryptionZoneForPath(zoneDir).getPath().toString(), "Got unexpected ez path");
}
@Test
@@ -1760,12 +1758,12 @@ public void testGetEncryptionZoneOnANonExistentPaths() throws Exception {
dfsAdmin.createEncryptionZone(ezPath, TEST_KEY, NO_TRASH);
Path zoneFile = new Path(ezPath, "file");
EncryptionZone ez = fs.getEZForPath(zoneFile);
- assertNotNull("Expected EZ for non-existent path in EZ", ez);
+ assertNotNull(ez, "Expected EZ for non-existent path in EZ");
ez = dfsAdmin.getEncryptionZoneForPath(zoneFile);
- assertNotNull("Expected EZ for non-existent path in EZ", ez);
+ assertNotNull(ez, "Expected EZ for non-existent path in EZ");
ez = dfsAdmin.getEncryptionZoneForPath(
new Path("/does/not/exist"));
- assertNull("Expected null for non-existent path not in EZ", ez);
+ assertNull(ez, "Expected null for non-existent path not in EZ");
}
@Test
@@ -1851,8 +1849,8 @@ public void testRootDirEZTrash() throws Exception {
// if root path is an encryption zone
Path encFileCurrentTrash = shell.getCurrentTrashDir(encFile);
Path rootDirCurrentTrash = shell.getCurrentTrashDir(rootDir);
- assertEquals("Root trash should be equal with ezFile trash",
- encFileCurrentTrash, rootDirCurrentTrash);
+ assertEquals(
+ encFileCurrentTrash, rootDirCurrentTrash, "Root trash should be equal with ezFile trash");
// Use webHDFS client to test trash root path
final WebHdfsFileSystem webFS = WebHdfsTestUtil.getWebHdfsFileSystem(
@@ -1882,7 +1880,7 @@ public void testGetTrashRoots() throws Exception {
fs.mkdirs(ezRoot3);
dfsAdmin.createEncryptionZone(ezRoot3, TEST_KEY, NO_TRASH);
Collection trashRootsBegin = fs.getTrashRoots(true);
- assertEquals("Unexpected getTrashRoots result", 0, trashRootsBegin.size());
+ assertEquals(0, trashRootsBegin.size(), "Unexpected getTrashRoots result");
final Path encFile = new Path(ezRoot2, "encFile");
final int len = 8192;
@@ -1893,16 +1891,16 @@ public void testGetTrashRoots() throws Exception {
verifyShellDeleteWithTrash(shell, encFile);
Collection trashRootsDelete1 = fs.getTrashRoots(true);
- assertEquals("Unexpected getTrashRoots result", 1,
- trashRootsDelete1.size());
+ assertEquals(1,
+ trashRootsDelete1.size(), "Unexpected getTrashRoots result");
final Path nonEncFile = new Path("/nonEncFile");
DFSTestUtil.createFile(fs, nonEncFile, len, (short) 1, 0xFEED);
verifyShellDeleteWithTrash(shell, nonEncFile);
Collection trashRootsDelete2 = fs.getTrashRoots(true);
- assertEquals("Unexpected getTrashRoots result", 2,
- trashRootsDelete2.size());
+ assertEquals(2,
+ trashRootsDelete2.size(), "Unexpected getTrashRoots result");
}
private void verifyShellDeleteWithTrash(FsShell shell, Path path)
@@ -1914,14 +1912,14 @@ private void verifyShellDeleteWithTrash(FsShell shell, Path path)
while (!checkTrash.isRoot() && !checkTrash.getName().equals(".Trash")) {
checkTrash = checkTrash.getParent();
}
- assertEquals("No .Trash component found in trash dir " + trashDir,
- ".Trash", checkTrash.getName());
+ assertEquals(
+ ".Trash", checkTrash.getName(), "No .Trash component found in trash dir " + trashDir);
final Path trashFile =
new Path(shell.getCurrentTrashDir(path) + "/" + path);
String[] argv = new String[]{"-rm", "-r", path.toString()};
int res = ToolRunner.run(shell, argv);
- assertEquals("rm failed", 0, res);
- assertTrue("File not in trash : " + trashFile, fs.exists(trashFile));
+ assertEquals(0, res, "rm failed");
+ assertTrue(fs.exists(trashFile), "File not in trash : " + trashFile);
} catch (IOException ioe) {
fail(ioe.getMessage());
} finally {
@@ -1948,9 +1946,9 @@ public void testProviderUriInCredentials() throws Exception {
credentials.addSecretKey(lookUpKey,
DFSUtilClient.string2Bytes(dummyKeyProvider));
client.ugi.addCredentials(credentials);
- Assert.assertEquals("Client Key provider is different from provider in "
- + "credentials map", dummyKeyProvider,
- client.getKeyProviderUri().toString());
+ Assertions.assertEquals(dummyKeyProvider,
+ client.getKeyProviderUri().toString(), "Client Key provider is different from provider in "
+ + "credentials map");
}
@@ -1974,9 +1972,9 @@ public void testKeyProviderFallBackBehavior() throws IOException {
getTestServerDefaults(null);
Mockito.doReturn(serverDefaultsWithKeyProviderNull)
.when(mockClient).getServerDefaults();
- Assert.assertEquals(
- "Key provider uri from client doesn't match with uri from conf",
- dummyKeyProviderUri1, mockClient.getKeyProviderUri().toString());
+ Assertions.assertEquals(
+ dummyKeyProviderUri1, mockClient.getKeyProviderUri().toString(),
+ "Key provider uri from client doesn't match with uri from conf");
Mockito.verify(mockClient, Mockito.times(1)).getServerDefaults();
String dummyKeyProviderUri2 = "dummy://foo:bar@test_provider2";
@@ -1985,9 +1983,9 @@ public void testKeyProviderFallBackBehavior() throws IOException {
// Namenode returning dummyKeyProvider2 in serverDefaults.
Mockito.doReturn(serverDefaultsWithDummyKeyProvider)
.when(mockClient).getServerDefaults();
- Assert.assertEquals(
- "Key provider uri from client doesn't match with uri from namenode",
- dummyKeyProviderUri2, mockClient.getKeyProviderUri().toString());
+ Assertions.assertEquals(
+ dummyKeyProviderUri2, mockClient.getKeyProviderUri().toString(),
+ "Key provider uri from client doesn't match with uri from namenode");
Mockito.verify(mockClient, Mockito.times(2)).getServerDefaults();
}
@@ -2001,38 +1999,38 @@ public void testKeyProviderFallBackBehavior() throws IOException {
public void testDifferentKMSProviderOnUpgradedNamenode() throws Exception {
Configuration clusterConf = cluster.getConfiguration(0);
URI namenodeKeyProviderUri = URI.create(getKeyProviderURI());
- Assert.assertEquals("Key Provider for client and namenode are different",
- namenodeKeyProviderUri, cluster.getFileSystem().getClient()
- .getKeyProviderUri());
+ Assertions.assertEquals(
+ namenodeKeyProviderUri, cluster.getFileSystem().getClient()
+ .getKeyProviderUri(), "Key Provider for client and namenode are different");
// Unset the provider path in conf
clusterConf.unset(
CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH);
- // Even after unsetting the local conf, the client key provider should be
- // the same as namenode's provider.
- Assert.assertEquals("Key Provider for client and namenode are different",
- namenodeKeyProviderUri, cluster.getFileSystem().getClient()
- .getKeyProviderUri());
+ // Even after unsetting the local conf, the client key provider should be
+ // the same as namenode's provider.
+ Assertions.assertEquals(
+ namenodeKeyProviderUri, cluster.getFileSystem().getClient()
+ .getKeyProviderUri(), "Key Provider for client and namenode are different");
// Set the provider path to some dummy scheme.
clusterConf.set(
CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
"dummy://foo:bar@test_provider1");
- // Even after pointing the conf to some dummy provider, the client key
- // provider should be the same as namenode's provider.
- Assert.assertEquals("Key Provider for client and namenode are different",
- namenodeKeyProviderUri, cluster.getFileSystem().getClient()
- .getKeyProviderUri());
+ // Even after pointing the conf to some dummy provider, the client key
+ // provider should be the same as namenode's provider.
+ Assertions.assertEquals(
+ namenodeKeyProviderUri, cluster.getFileSystem().getClient()
+ .getKeyProviderUri(), "Key Provider for client and namenode are different");
// Ignore the key provider from NN.
clusterConf.setBoolean(
DFS_CLIENT_IGNORE_NAMENODE_DEFAULT_KMS_URI, true);
- Assert.assertEquals("Expecting Key Provider for client config",
- "dummy://foo:bar@test_provider1", cluster.getFileSystem().getClient()
- .getKeyProviderUri().toString());
- Assert.assertNotEquals("Key Provider for client and namenode is different",
- namenodeKeyProviderUri, cluster.getFileSystem().getClient()
- .getKeyProviderUri().toString());
+ Assertions.assertEquals(
+ "dummy://foo:bar@test_provider1", cluster.getFileSystem().getClient()
+ .getKeyProviderUri().toString(), "Expecting Key Provider for client config");
+ Assertions.assertNotEquals(
+ namenodeKeyProviderUri, cluster.getFileSystem().getClient()
+ .getKeyProviderUri().toString(), "Key Provider for client and namenode is different");
}
/**
@@ -2047,11 +2045,11 @@ public void testDifferentKMSProviderOnUnUpgradedNamenode()
URI namenodeKeyProviderUri = URI.create(getKeyProviderURI());
URI clientKeyProviderUri =
cluster.getFileSystem().getClient().getKeyProviderUri();
- Assert.assertNotNull(clientKeyProviderUri);
- // Since the client and the namenode share the same conf, they will have
- // identical key provider.
- Assert.assertEquals("Key Provider for client and namenode are different",
- namenodeKeyProviderUri, clientKeyProviderUri);
+ Assertions.assertNotNull(clientKeyProviderUri);
+ // Since the client and the namenode share the same conf, they will have
+ // identical key provider.
+ Assertions.assertEquals(
+ namenodeKeyProviderUri, clientKeyProviderUri, "Key Provider for client and namenode are different");
String dummyKeyProviderUri = "dummy://foo:bar@test_provider";
// Unset the provider path in conf.
@@ -2067,8 +2065,8 @@ public void testDifferentKMSProviderOnUnUpgradedNamenode()
// Since FsServerDefaults#keyProviderUri is null, the client
// will fallback to local conf which is null.
clientKeyProviderUri = spyClient.getKeyProviderUri();
- Assert.assertEquals("Client keyProvider should be " + dummyKeyProviderUri,
- dummyKeyProviderUri, clientKeyProviderUri.toString());
+ Assertions.assertEquals(
+ dummyKeyProviderUri, clientKeyProviderUri.toString(), "Client keyProvider should be " + dummyKeyProviderUri);
Mockito.verify(spyClient, Mockito.times(1)).getServerDefaults();
}
@@ -2157,17 +2155,17 @@ public void testListEncryptionZonesWithSnapshots() throws Exception {
boolean match = false;
while (it.hasNext()) {
EncryptionZone ez = it.next();
- assertNotEquals("EncryptionZone " + zoneSubChild.toString() +
- " should not be listed.",
- ez.getPath(), zoneSubChild.toString());
+ assertNotEquals(
+ ez.getPath(), zoneSubChild.toString(), "EncryptionZone " + zoneSubChild.toString() +
+ " should not be listed.");
}
//will "trash" the zone direct child of snapshottable directory
verifyShellDeleteWithTrash(shell, zoneDirectChild);
//permanently remove zone direct child of snapshottable directory
fsWrapper.delete(shell.getCurrentTrashDir(zoneDirectChild), true);
- assertFalse("listEncryptionZones should not return anything, " +
- "since both EZs were deleted.",
- dfsAdmin.listEncryptionZones().hasNext());
+ assertFalse(
+ dfsAdmin.listEncryptionZones().hasNext(), "listEncryptionZones should not return anything, " +
+ "since both EZs were deleted.");
}
/**
@@ -2200,9 +2198,9 @@ public void addMockKmsToken() throws Exception {
final Token>[] tokens =
webfs.addDelegationTokens("JobTracker", creds);
- Assert.assertEquals(2, tokens.length);
- Assert.assertEquals(tokens[1], testToken);
- Assert.assertEquals(2, creds.numberOfTokens());
+ Assertions.assertEquals(2, tokens.length);
+ Assertions.assertEquals(tokens[1], testToken);
+ Assertions.assertEquals(2, creds.numberOfTokens());
}
/**
@@ -2237,8 +2235,8 @@ public void testWebhdfsRead() throws Exception {
// raw encrypted bytes.
InputStream cryptoStream =
webhdfs.open(encryptedFilePath).getWrappedStream();
- Assert.assertTrue("cryptoStream should be an instance of "
- + "CryptoInputStream", (cryptoStream instanceof CryptoInputStream));
+ Assertions.assertTrue((cryptoStream instanceof CryptoInputStream), "cryptoStream should be an instance of "
+ + "CryptoInputStream");
InputStream encryptedStream =
((CryptoInputStream)cryptoStream).getWrappedStream();
// Verify that the data read from the raw input stream is different
@@ -2254,7 +2252,7 @@ private void verifyStreamsSame(String content, InputStream is)
IOUtils.copyBytes(is, os, 1024, true);
streamBytes = os.toByteArray();
}
- Assert.assertArrayEquals(content.getBytes(), streamBytes);
+ Assertions.assertArrayEquals(content.getBytes(), streamBytes);
}
private void verifyRaw(String content, InputStream is, InputStream rawIs)
@@ -2264,14 +2262,14 @@ private void verifyRaw(String content, InputStream is, InputStream rawIs)
IOUtils.copyBytes(is, os, 1024, true);
streamBytes = os.toByteArray();
}
- Assert.assertFalse(Arrays.equals(content.getBytes(), streamBytes));
+ Assertions.assertFalse(Arrays.equals(content.getBytes(), streamBytes));
// webhdfs raw bytes should match the raw bytes from dfs.
try (ByteArrayOutputStream os = new ByteArrayOutputStream()) {
IOUtils.copyBytes(rawIs, os, 1024, true);
rawBytes = os.toByteArray();
}
- Assert.assertArrayEquals(rawBytes, streamBytes);
+ Assertions.assertArrayEquals(rawBytes, streamBytes);
}
/* Tests that if client is old and namenode is new then the
@@ -2297,7 +2295,7 @@ public void testWebhdfsReadOldBehavior() throws Exception {
String location = namenodeConnection.getHeaderField("Location");
URL datanodeURL = new URL(location);
String path = datanodeURL.getPath();
- Assert.assertEquals(
+ Assertions.assertEquals(
WebHdfsFileSystem.PATH_PREFIX + encryptedFilePath.toString(), path);
HttpURLConnection datanodeConnection = returnConnection(datanodeURL,
"GET", false);
@@ -2330,11 +2328,11 @@ public void testWebhfsEZRedirectLocation()
// Return a connection with client not supporting EZ.
HttpURLConnection namenodeConnection =
returnConnection(url, "GET", false);
- Assert.assertNotNull(namenodeConnection.getHeaderField("Location"));
+ Assertions.assertNotNull(namenodeConnection.getHeaderField("Location"));
URL datanodeUrl = new URL(namenodeConnection.getHeaderField("Location"));
- Assert.assertNotNull(datanodeUrl);
+ Assertions.assertNotNull(datanodeUrl);
String path = datanodeUrl.getPath();
- Assert.assertEquals(
+ Assertions.assertEquals(
WebHdfsFileSystem.PATH_PREFIX + encryptedFilePath.toString(), path);
url = new URL("http", addr.getHostString(), addr.getPort(),
@@ -2342,11 +2340,11 @@ public void testWebhfsEZRedirectLocation()
+ "?op=OPEN");
// Return a connection with client supporting EZ.
namenodeConnection = returnConnection(url, "GET", true);
- Assert.assertNotNull(namenodeConnection.getHeaderField("Location"));
+ Assertions.assertNotNull(namenodeConnection.getHeaderField("Location"));
datanodeUrl = new URL(namenodeConnection.getHeaderField("Location"));
- Assert.assertNotNull(datanodeUrl);
+ Assertions.assertNotNull(datanodeUrl);
path = datanodeUrl.getPath();
- Assert.assertEquals(WebHdfsFileSystem.PATH_PREFIX
+ Assertions.assertEquals(WebHdfsFileSystem.PATH_PREFIX
+ "/.reserved/raw" + encryptedFilePath.toString(), path);
}
@@ -2382,7 +2380,7 @@ public void testPread() throws Exception {
FSDataInputStream in = webfs.open(encryptedFilePath);
for (int i = 0; i < 1024; i++) {
in.seek(i);
- Assert.assertEquals((data[i] & 0XFF), in.read());
+ Assertions.assertEquals((data[i] & 0XFF), in.read());
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesWithHA.java
index bb8b79b723f9f..651e32025794d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesWithHA.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesWithHA.java
@@ -28,10 +28,10 @@
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
import org.apache.hadoop.hdfs.client.HdfsAdmin;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
import java.io.File;
import java.io.IOException;
@@ -55,7 +55,7 @@ public class TestEncryptionZonesWithHA {
protected static final EnumSet< CreateEncryptionZoneFlag > NO_TRASH =
EnumSet.of(CreateEncryptionZoneFlag.NO_TRASH);
- @Before
+ @BeforeEach
public void setupCluster() throws Exception {
conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
@@ -87,7 +87,7 @@ public void setupCluster() throws Exception {
fs.getClient().setKeyProvider(nn0Provider);
}
- @After
+ @AfterEach
public void shutdownCluster() throws IOException {
if (cluster != null) {
cluster.shutdown();
@@ -115,12 +115,12 @@ public void testEncryptionZonesTrackedOnStandby() throws Exception {
cluster.shutdownNameNode(0);
cluster.transitionToActive(1);
- Assert.assertEquals("Got unexpected ez path", dir.toString(),
- dfsAdmin1.getEncryptionZoneForPath(dir).getPath().toString());
- Assert.assertEquals("Got unexpected ez path", dir.toString(),
- dfsAdmin1.getEncryptionZoneForPath(dirChild).getPath().toString());
- Assert.assertEquals("File contents after failover were changed",
- contents, DFSTestUtil.readFile(fs, dirFile));
+ Assertions.assertEquals(dir.toString(),
+ dfsAdmin1.getEncryptionZoneForPath(dir).getPath().toString(), "Got unexpected ez path");
+ Assertions.assertEquals(dir.toString(),
+ dfsAdmin1.getEncryptionZoneForPath(dirChild).getPath().toString(), "Got unexpected ez path");
+ Assertions.assertEquals(
+ contents, DFSTestUtil.readFile(fs, dirFile), "File contents after failover were changed");
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesWithKMS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesWithKMS.java
index d29db2bffc3c4..05c1f2fe77027 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesWithKMS.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesWithKMS.java
@@ -17,9 +17,10 @@
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertTrue;
-
import java.util.function.Supplier;
+
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
import org.apache.hadoop.crypto.key.kms.KMSDelegationToken;
import org.apache.hadoop.crypto.key.kms.LoadBalancingKMSClientProvider;
@@ -34,10 +35,10 @@
import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.Whitebox;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
import java.io.File;
import java.util.Arrays;
@@ -53,18 +54,18 @@ protected String getKeyProviderURI() {
miniKMS.getKMSUrl().toExternalForm().replace("://", "@");
}
- @Before
+ @BeforeEach
public void setup() throws Exception {
File kmsDir = new File("target/test-classes/" +
UUID.randomUUID().toString());
- Assert.assertTrue(kmsDir.mkdirs());
+ Assertions.assertTrue(kmsDir.mkdirs());
MiniKMS.Builder miniKMSBuilder = new MiniKMS.Builder();
miniKMS = miniKMSBuilder.setKmsConfDir(kmsDir).build();
miniKMS.start();
super.setup();
}
- @After
+ @AfterEach
public void teardown() {
super.teardown();
miniKMS.stop();
@@ -100,13 +101,13 @@ public void testDelegationToken() throws Exception {
Credentials creds = new Credentials();
Token> tokens[] = fs.addDelegationTokens(renewer, creds);
LOG.debug("Delegation tokens: " + Arrays.asList(tokens));
- Assert.assertEquals(2, tokens.length);
- Assert.assertEquals(2, creds.numberOfTokens());
+ Assertions.assertEquals(2, tokens.length);
+ Assertions.assertEquals(2, creds.numberOfTokens());
// If the dt exists, will not get again
tokens = fs.addDelegationTokens(renewer, creds);
- Assert.assertEquals(0, tokens.length);
- Assert.assertEquals(2, creds.numberOfTokens());
+ Assertions.assertEquals(0, tokens.length);
+ Assertions.assertEquals(2, creds.numberOfTokens());
}
@Test(timeout = 120000)
@@ -122,8 +123,8 @@ public void testWarmupEDEKCacheOnStartup() throws Exception {
@SuppressWarnings("unchecked")
KMSClientProvider spy = getKMSClientProvider();
- assertTrue("key queue is empty after creating encryption zone",
- spy.getEncKeyQueueSize(TEST_KEY) > 0);
+ assertTrue(
+ spy.getEncKeyQueueSize(TEST_KEY) > 0, "key queue is empty after creating encryption zone");
conf.setInt(
DFSConfigKeys.DFS_NAMENODE_EDEKCACHELOADER_INITIAL_DELAY_MS_KEY, 0);
@@ -151,9 +152,9 @@ public void addDelegationTokenFromWebhdfsFileSystem() throws Exception {
Credentials creds = new Credentials();
final Token>[] tokens = webfs.addDelegationTokens("JobTracker", creds);
- Assert.assertEquals(2, tokens.length);
- Assert.assertEquals(KMSDelegationToken.TOKEN_KIND_STR,
+ Assertions.assertEquals(2, tokens.length);
+ Assertions.assertEquals(KMSDelegationToken.TOKEN_KIND_STR,
tokens[1].getKind().toString());
- Assert.assertEquals(2, creds.numberOfTokens());
+ Assertions.assertEquals(2, creds.numberOfTokens());
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodeBenchmarkThroughput.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodeBenchmarkThroughput.java
index da3407d2fc12a..860d31f811a57 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodeBenchmarkThroughput.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodeBenchmarkThroughput.java
@@ -23,10 +23,10 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.util.ToolRunner;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
import org.junit.Rule;
import org.junit.rules.Timeout;
@@ -43,7 +43,7 @@ public class TestErasureCodeBenchmarkThroughput {
@Rule
public Timeout globalTimeout = new Timeout(300000);
- @BeforeClass
+ @BeforeAll
public static void setup() throws IOException {
conf = new HdfsConfiguration();
int numDN = ErasureCodeBenchmarkThroughput.getEcPolicy().getNumDataUnits() +
@@ -55,7 +55,7 @@ public static void setup() throws IOException {
ErasureCodeBenchmarkThroughput.getEcPolicy().getName());
}
- @AfterClass
+ @AfterAll
public static void tearDown() {
if (cluster != null) {
cluster.shutdown(true);
@@ -63,9 +63,9 @@ public static void tearDown() {
}
private static void runBenchmark(String[] args) throws Exception {
- Assert.assertNotNull(conf);
- Assert.assertNotNull(fs);
- Assert.assertEquals(0, ToolRunner.run(conf,
+ Assertions.assertNotNull(conf);
+ Assertions.assertNotNull(fs);
+ Assertions.assertEquals(0, ToolRunner.run(conf,
new ErasureCodeBenchmarkThroughput(fs), args));
}
@@ -80,7 +80,7 @@ public boolean accept(Path path) {
ErasureCodeBenchmarkThroughput.getFilePath(dataSize, isEc));
}
});
- Assert.assertEquals(numFile, statuses.length);
+ Assertions.assertEquals(numFile, statuses.length);
}
@Test
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingAddConfig.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingAddConfig.java
index 24c88bd629129..181ae3c81aadf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingAddConfig.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingAddConfig.java
@@ -17,10 +17,7 @@
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
import java.io.IOException;
@@ -28,7 +25,7 @@
import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.io.erasurecode.ECSchema;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
/**
* Test that ensures addition of user defined EC policies is allowed only when
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingExerciseAPIs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingExerciseAPIs.java
index e4b09a8b336c4..9b852b2bb0112 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingExerciseAPIs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingExerciseAPIs.java
@@ -33,9 +33,9 @@
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.Lists;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -58,7 +58,7 @@
import static org.apache.hadoop.fs.permission.FsAction.NONE;
import static org.apache.hadoop.fs.permission.FsAction.READ_EXECUTE;
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.*;
/**
* Test after enable Erasure Coding on cluster, exercise Java API make sure they
@@ -82,7 +82,7 @@ private static ErasureCodingPolicy getEcPolicy() {
LoggerFactory.getLogger(TestErasureCodingExerciseAPIs.class);
- @Before
+ @BeforeEach
public void setupCluster() throws IOException {
ecPolicy = getEcPolicy();
conf = new HdfsConfiguration();
@@ -252,8 +252,8 @@ public void testACLAPI() throws IOException {
AclStatus as = fs.getAclStatus(p);
for (AclEntry entry : aclSpec) {
- assertTrue(String.format("as: %s, entry: %s", as, entry),
- as.getEntries().contains(entry));
+ assertTrue(
+ as.getEntries().contains(entry), String.format("as: %s, entry: %s", as, entry));
}
List maclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "bar", READ_EXECUTE),
@@ -262,8 +262,8 @@ public void testACLAPI() throws IOException {
as = fs.getAclStatus(p);
for (AclEntry entry : maclSpec) {
- assertTrue(String.format("as: %s, entry: %s", as, entry),
- as.getEntries().contains(entry));
+ assertTrue(
+ as.getEntries().contains(entry), String.format("as: %s, entry: %s", as, entry));
}
fs.removeAclEntries(p, maclSpec);
@@ -539,7 +539,7 @@ public void testTruncate() throws IOException {
}
}
- @After
+ @AfterEach
public void shutdownCluster() {
if (cluster != null) {
cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingMultipleRacks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingMultipleRacks.java
index e47cbf0c879e2..097ae4ace4850 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingMultipleRacks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingMultipleRacks.java
@@ -27,10 +27,10 @@
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFaultTolerant;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Before;
import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
import org.junit.rules.Timeout;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -40,8 +40,8 @@
import java.util.Map;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Test erasure coding block placement with skewed # nodes per rack.
@@ -70,7 +70,7 @@ public ErasureCodingPolicy getPolicy() {
private Configuration conf;
private DistributedFileSystem dfs;
- @Before
+ @BeforeEach
public void setup() {
ecPolicy = getPolicy();
conf = new HdfsConfiguration();
@@ -98,7 +98,7 @@ public void setupCluster(final int numDatanodes, final int numRacks,
dfs.setErasureCodingPolicy(new Path("/"), ecPolicy.getName());
}
- @After
+ @AfterEach
public void teardown() throws Exception {
if (cluster != null) {
cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
index 835d18f3a08ae..57c1f6231fa61 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
@@ -41,11 +41,11 @@
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
import org.junit.rules.Timeout;
import java.io.FileNotFoundException;
@@ -58,7 +58,7 @@
import java.util.Map;
import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.*;
public class TestErasureCodingPolicies {
private Configuration conf;
@@ -75,7 +75,7 @@ public ErasureCodingPolicy getEcPolicy() {
@Rule
public Timeout timeout = new Timeout(60 * 1000);
- @Before
+ @BeforeEach
public void setupCluster() throws IOException {
ecPolicy = getEcPolicy();
conf = new HdfsConfiguration();
@@ -89,7 +89,7 @@ public void setupCluster() throws IOException {
DFSTestUtil.enableAllECPolicies(fs);
}
- @After
+ @AfterEach
public void shutdownCluster() throws IOException {
if (cluster != null) {
cluster.shutdown();
@@ -227,13 +227,13 @@ public void testBasicSetECPolicy()
// Already set directory-level policies should still be in effect
Path disabledPolicy = new Path(dir1, "afterDisabled");
- Assert.assertEquals("Dir does not have policy set",
- ecPolicy,
- fs.getErasureCodingPolicy(dir1));
+ Assertions.assertEquals(
+ ecPolicy,
+ fs.getErasureCodingPolicy(dir1), "Dir does not have policy set");
fs.create(disabledPolicy).close();
- Assert.assertEquals("File did not inherit dir's policy",
- ecPolicy,
- fs.getErasureCodingPolicy(disabledPolicy));
+ Assertions.assertEquals(
+ ecPolicy,
+ fs.getErasureCodingPolicy(disabledPolicy), "File did not inherit dir's policy");
// Also check loading disabled EC policies from fsimage
fs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
@@ -241,12 +241,12 @@ public void testBasicSetECPolicy()
fs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
cluster.restartNameNodes();
- Assert.assertEquals("Dir does not have policy set",
- ecPolicy,
- fs.getErasureCodingPolicy(dir1));
- Assert.assertEquals("File does not have policy set",
- ecPolicy,
- fs.getErasureCodingPolicy(disabledPolicy));
+ Assertions.assertEquals(
+ ecPolicy,
+ fs.getErasureCodingPolicy(dir1), "Dir does not have policy set");
+ Assertions.assertEquals(
+ ecPolicy,
+ fs.getErasureCodingPolicy(disabledPolicy), "File does not have policy set");
}
@Test
@@ -325,19 +325,19 @@ public void testErasureCodingPolicyOnReservedDir() throws IOException {
final Path reserveDir = new Path("/.reserved");
// verify the EC policy is null, not an exception
ErasureCodingPolicy policy = fs.getErasureCodingPolicy(reserveDir);
- assertNull("Got unexpected erasure coding policy", policy);
+ assertNull(policy, "Got unexpected erasure coding policy");
// root EC policy before being set is null, verify the reserved raw dir
// is treated as root
final Path root = new Path("/");
final Path rawRoot = new Path("/.reserved/raw");
final Path rawRootSlash = new Path("/.reserved/raw/");
- assertNull("Got unexpected erasure coding policy",
- fs.getErasureCodingPolicy(root));
- assertNull("Got unexpected erasure coding policy",
- fs.getErasureCodingPolicy(rawRoot));
- assertNull("Got unexpected erasure coding policy",
- fs.getErasureCodingPolicy(rawRootSlash));
+ assertNull(
+ fs.getErasureCodingPolicy(root), "Got unexpected erasure coding policy");
+ assertNull(
+ fs.getErasureCodingPolicy(rawRoot), "Got unexpected erasure coding policy");
+ assertNull(
+ fs.getErasureCodingPolicy(rawRootSlash), "Got unexpected erasure coding policy");
// verify the EC policy correctness under the reserved raw dir
final Path ecDir = new Path("/ec");
@@ -345,21 +345,21 @@ public void testErasureCodingPolicyOnReservedDir() throws IOException {
fs.setErasureCodingPolicy(ecDir, ecPolicy.getName());
ErasureCodingPolicy policyBase = fs.getErasureCodingPolicy(ecDir);
- assertEquals("Got unexpected erasure coding policy", ecPolicy,
- policyBase);
+ assertEquals(ecPolicy,
+ policyBase, "Got unexpected erasure coding policy");
final Path rawRootEc = new Path("/.reserved/raw/ec");
ErasureCodingPolicy policyMap = fs.getErasureCodingPolicy(rawRootEc);
- assertEquals("Got unexpected erasure coding policy", ecPolicy,
- policyMap);
+ assertEquals(ecPolicy,
+ policyMap, "Got unexpected erasure coding policy");
}
@Test
public void testGetErasureCodingPolicy() throws Exception {
List sysECPolicies =
SystemErasureCodingPolicies.getPolicies();
- assertTrue("System ecPolicies should exist",
- sysECPolicies.size() > 0);
+ assertTrue(
+ sysECPolicies.size() > 0, "System ecPolicies should exist");
ErasureCodingPolicy usingECPolicy = sysECPolicies.get(0);
String src = "/ec2";
@@ -380,8 +380,8 @@ private void verifyErasureCodingInfo(
HdfsFileStatus hdfsFileStatus = fs.getClient().getFileInfo(src);
ErasureCodingPolicy actualPolicy = hdfsFileStatus.getErasureCodingPolicy();
assertNotNull(actualPolicy);
- assertEquals("Actually used ecPolicy should be equal with target ecPolicy",
- usingECPolicy, actualPolicy);
+ assertEquals(
+ usingECPolicy, actualPolicy, "Actually used ecPolicy should be equal with target ecPolicy");
}
@Test
@@ -434,7 +434,7 @@ public void testGetAllErasureCodingPolicies() throws Exception {
sysPolicies.remove(ecpi.getPolicy());
}
}
- assertTrue("All system policies should be enabled", sysPolicies.isEmpty());
+ assertTrue(sysPolicies.isEmpty(), "All system policies should be enabled");
// Query after add a new policy
ECSchema toAddSchema = new ECSchema("rs", 5, 2);
@@ -443,9 +443,9 @@ public void testGetAllErasureCodingPolicies() throws Exception {
ErasureCodingPolicy[] policyArray = new ErasureCodingPolicy[]{newPolicy};
fs.addErasureCodingPolicies(policyArray);
allECPolicies = fs.getAllErasureCodingPolicies();
- assertEquals("Should return new added policy",
- SystemErasureCodingPolicies.getPolicies().size() + 1,
- allECPolicies.size());
+ assertEquals(
+ SystemErasureCodingPolicies.getPolicies().size() + 1,
+ allECPolicies.size(), "Should return new added policy");
}
@@ -515,13 +515,13 @@ public HdfsAdmin run() throws Exception {
userfs.mkdirs(ecdir);
final String ecPolicyName = ecPolicy.getName();
useradmin.setErasureCodingPolicy(ecdir, ecPolicyName);
- assertEquals("Policy not present on dir",
- ecPolicyName,
- useradmin.getErasureCodingPolicy(ecdir).getName());
+ assertEquals(
+ ecPolicyName,
+ useradmin.getErasureCodingPolicy(ecdir).getName(), "Policy not present on dir");
userfs.create(ecfile).close();
- assertEquals("Policy not present on file",
- ecPolicyName,
- useradmin.getErasureCodingPolicy(ecfile).getName());
+ assertEquals(
+ ecPolicyName,
+ useradmin.getErasureCodingPolicy(ecfile).getName(), "Policy not present on file");
// Unset and re-set
useradmin.unsetErasureCodingPolicy(ecdir);
@@ -631,7 +631,7 @@ public void testFileLevelECPolicy() throws Exception {
final String illegalPolicyName = "RS-DEFAULT-1-2-64k";
try {
fs.createFile(filePath1).ecPolicyName(illegalPolicyName).build().close();
- Assert.fail("illegal erasure coding policy should not be found");
+ Assertions.fail("illegal erasure coding policy should not be found");
} catch (Exception e) {
GenericTestUtils.assertExceptionContains("Policy '" + illegalPolicyName
+ "' does not match any enabled erasure coding policies", e);
@@ -691,7 +691,7 @@ public void testEnforceAsReplicatedFile() throws Exception {
.ecPolicyName(ecPolicyName)
.replicate()
.build().close();
- Assert.fail("shouldReplicate and ecPolicyName are exclusive " +
+ Assertions.fail("shouldReplicate and ecPolicyName are exclusive " +
"parameters. Set both is not allowed.");
}catch (Exception e){
GenericTestUtils.assertExceptionContains("SHOULD_REPLICATE flag and " +
@@ -704,7 +704,7 @@ public void testEnforceAsReplicatedFile() throws Exception {
EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE,
CreateFlag.SHOULD_REPLICATE), false, (short) 1, 1024, null, 1024,
null, null, ecPolicyName);
- Assert.fail("SHOULD_REPLICATE flag and ecPolicyName are exclusive " +
+ Assertions.fail("SHOULD_REPLICATE flag and ecPolicyName are exclusive " +
"parameters. Set both is not allowed.");
}catch (Exception e){
GenericTestUtils.assertExceptionContains("SHOULD_REPLICATE flag and " +
@@ -723,8 +723,8 @@ public void testEnforceAsReplicatedFile() throws Exception {
public void testGetAllErasureCodingCodecs() throws Exception {
Map allECCodecs = fs
.getAllErasureCodingCodecs();
- assertTrue("At least 3 system codecs should be enabled",
- allECCodecs.size() >= 3);
+ assertTrue(
+ allECCodecs.size() >= 3, "At least 3 system codecs should be enabled");
System.out.println("Erasure Coding Codecs: Codec [Coder List]");
for (String codec : allECCodecs.keySet()) {
String coders = allECCodecs.get(codec);
@@ -762,7 +762,7 @@ public void testAddErasureCodingPolicies() throws Exception {
for (int cellSize: cellSizes) {
try {
new ErasureCodingPolicy(toAddSchema, cellSize);
- Assert.fail("Invalid cell size should be detected.");
+ Assertions.fail("Invalid cell size should be detected.");
} catch (Exception e){
GenericTestUtils.assertExceptionContains("cellSize must be", e);
}
@@ -852,66 +852,66 @@ public void testReplicationPolicy() throws Exception {
fs.mkdirs(replicaDir);
fs.createFile(replicaFile).build().close();
HdfsFileStatus fileStatus = (HdfsFileStatus)fs.getFileStatus(replicaFile);
- assertEquals("File should inherit EC policy.", ecPolicy, fileStatus
- .getErasureCodingPolicy());
- assertEquals("File should be a EC file.", true, fileStatus
- .isErasureCoded());
- assertEquals("File should have the same EC policy as its ancestor.",
- ecPolicy, fs.getErasureCodingPolicy(replicaFile));
+ assertEquals(ecPolicy, fileStatus
+ .getErasureCodingPolicy(), "File should inherit EC policy.");
+ assertEquals(true, fileStatus
+ .isErasureCoded(), "File should be a EC file.");
+ assertEquals(
+ ecPolicy, fs.getErasureCodingPolicy(replicaFile), "File should have the same EC policy as its ancestor.");
fs.delete(replicaFile, false);
// 2. Set replication policy on child directory, then get back the policy
fs.setErasureCodingPolicy(replicaDir, replicaPolicy.getName());
ErasureCodingPolicy temp = fs.getErasureCodingPolicy(replicaDir);
- assertEquals("Directory should hide replication EC policy.",
- null, temp);
+ assertEquals(
+ null, temp, "Directory should hide replication EC policy.");
// 3. New file will be replication file. Please be noted that replication
// policy only set on directory, not on file
fs.createFile(replicaFile).build().close();
- assertEquals("Replication file should have default replication factor.",
- fs.getDefaultReplication(),
- fs.getFileStatus(replicaFile).getReplication());
+ assertEquals(
+ fs.getDefaultReplication(),
+ fs.getFileStatus(replicaFile).getReplication(), "Replication file should have default replication factor.");
fs.setReplication(replicaFile, (short) 2);
- assertEquals("File should have replication factor as expected.",
- 2, fs.getFileStatus(replicaFile).getReplication());
+ assertEquals(
+ 2, fs.getFileStatus(replicaFile).getReplication(), "File should have replication factor as expected.");
fileStatus = (HdfsFileStatus)fs.getFileStatus(replicaFile);
- assertEquals("File should not have EC policy.", null, fileStatus
- .getErasureCodingPolicy());
- assertEquals("File should not be a EC file.", false,
- fileStatus.isErasureCoded());
+ assertEquals(null, fileStatus
+ .getErasureCodingPolicy(), "File should not have EC policy.");
+ assertEquals(false,
+ fileStatus.isErasureCoded(), "File should not be a EC file.");
ErasureCodingPolicy ecPolicyOnFile = fs.getErasureCodingPolicy(replicaFile);
- assertEquals("File should not have EC policy.", null, ecPolicyOnFile);
+ assertEquals(null, ecPolicyOnFile, "File should not have EC policy.");
fs.delete(replicaFile, false);
// 4. New directory under replication directory, is also replication
// directory
fs.mkdirs(subReplicaDir);
- assertEquals("Directory should inherit hiding replication EC policy.",
- null, fs.getErasureCodingPolicy(subReplicaDir));
+ assertEquals(
+ null, fs.getErasureCodingPolicy(subReplicaDir), "Directory should inherit hiding replication EC policy.");
fs.createFile(subReplicaFile).build().close();
- assertEquals("File should have default replication factor.",
- fs.getDefaultReplication(),
- fs.getFileStatus(subReplicaFile).getReplication());
+ assertEquals(
+ fs.getDefaultReplication(),
+ fs.getFileStatus(subReplicaFile).getReplication(), "File should have default replication factor.");
fileStatus = (HdfsFileStatus)fs.getFileStatus(subReplicaFile);
- assertEquals("File should not have EC policy.", null,
- fileStatus.getErasureCodingPolicy());
- assertEquals("File should not be a EC file.", false,
- fileStatus.isErasureCoded());
- assertEquals("File should not have EC policy.", null,
- fs.getErasureCodingPolicy(subReplicaFile));
+ assertEquals(null,
+ fileStatus.getErasureCodingPolicy(), "File should not have EC policy.");
+ assertEquals(false,
+ fileStatus.isErasureCoded(), "File should not be a EC file.");
+ assertEquals(null,
+ fs.getErasureCodingPolicy(subReplicaFile), "File should not have EC policy.");
fs.delete(subReplicaFile, false);
// 5. Unset replication policy on directory, new file will be EC file
fs.unsetErasureCodingPolicy(replicaDir);
fs.createFile(subReplicaFile).build().close();
fileStatus = (HdfsFileStatus)fs.getFileStatus(subReplicaFile);
- assertEquals("File should inherit EC policy.", ecPolicy,
- fileStatus.getErasureCodingPolicy());
- assertEquals("File should be a EC file.", true,
- fileStatus.isErasureCoded());
- assertEquals("File should have the same EC policy as its ancestor",
- ecPolicy, fs.getErasureCodingPolicy(subReplicaFile));
+ assertEquals(ecPolicy,
+ fileStatus.getErasureCodingPolicy(), "File should inherit EC policy.");
+ assertEquals(true,
+ fileStatus.isErasureCoded(), "File should be a EC file.");
+ assertEquals(
+ ecPolicy, fs.getErasureCodingPolicy(subReplicaFile), "File should have the same EC policy as its ancestor");
fs.delete(subReplicaFile, false);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshot.java
index b7e7bba183260..2b53b9164c8a7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshot.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshot.java
@@ -17,8 +17,8 @@
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNull;
import java.io.IOException;
@@ -31,9 +31,9 @@
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
import org.apache.hadoop.util.ToolRunner;
-import org.junit.After;
-import org.junit.Before;
import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
import org.junit.Rule;
import org.junit.rules.Timeout;
@@ -53,7 +53,7 @@ public ErasureCodingPolicy getEcPolicy() {
@Rule
public Timeout globalTimeout = new Timeout(120000);
- @Before
+ @BeforeEach
public void setupCluster() throws IOException {
ecPolicy = getEcPolicy();
groupSize = (short) (ecPolicy.getNumDataUnits()
@@ -65,7 +65,7 @@ public void setupCluster() throws IOException {
fs.enableErasureCodingPolicy(ecPolicy.getName());
}
- @After
+ @AfterEach
public void shutdownCluster() throws IOException {
if (cluster != null) {
cluster.shutdown();
@@ -91,8 +91,8 @@ public void testSnapshotsOnErasureCodingDirsParentDir() throws Exception {
String contents = DFSTestUtil.readFile(fs, ecFile);
final Path snap1 = fs.createSnapshot(ecDirParent, "snap1");
final Path snap1ECDir = new Path(snap1, ecDir.getName());
- assertEquals("Got unexpected erasure coding policy", ecPolicy,
- fs.getErasureCodingPolicy(snap1ECDir));
+ assertEquals(ecPolicy,
+ fs.getErasureCodingPolicy(snap1ECDir), "Got unexpected erasure coding policy");
// Now delete the dir which has erasure coding policy. Re-create the dir again, and
// take another snapshot
@@ -100,8 +100,8 @@ public void testSnapshotsOnErasureCodingDirsParentDir() throws Exception {
fs.mkdir(ecDir, FsPermission.getDirDefault());
final Path snap2 = fs.createSnapshot(ecDirParent, "snap2");
final Path snap2ECDir = new Path(snap2, ecDir.getName());
- assertNull("Expected null erasure coding policy",
- fs.getErasureCodingPolicy(snap2ECDir));
+ assertNull(
+ fs.getErasureCodingPolicy(snap2ECDir), "Expected null erasure coding policy");
// Make dir again with system default ec policy
fs.setErasureCodingPolicy(ecDir, ecPolicy.getName());
@@ -109,31 +109,31 @@ public void testSnapshotsOnErasureCodingDirsParentDir() throws Exception {
final Path snap3ECDir = new Path(snap3, ecDir.getName());
// Check that snap3's ECPolicy has the correct settings
ErasureCodingPolicy ezSnap3 = fs.getErasureCodingPolicy(snap3ECDir);
- assertEquals("Got unexpected erasure coding policy", ecPolicy,
- ezSnap3);
+ assertEquals(ecPolicy,
+ ezSnap3, "Got unexpected erasure coding policy");
- // Check that older snapshots still have the old ECPolicy settings
- assertEquals("Got unexpected erasure coding policy", ecPolicy,
- fs.getErasureCodingPolicy(snap1ECDir));
- assertNull("Expected null erasure coding policy",
- fs.getErasureCodingPolicy(snap2ECDir));
+ // Check that older snapshots still have the old ECPolicy settings
+ assertEquals(ecPolicy,
+ fs.getErasureCodingPolicy(snap1ECDir), "Got unexpected erasure coding policy");
+ assertNull(
+ fs.getErasureCodingPolicy(snap2ECDir), "Expected null erasure coding policy");
// Verify contents of the snapshotted file
final Path snapshottedECFile = new Path(snap1.toString() + "/"
+ ecDir.getName() + "/" + ecFile.getName());
- assertEquals("Contents of snapshotted file have changed unexpectedly",
- contents, DFSTestUtil.readFile(fs, snapshottedECFile));
+ assertEquals(
+ contents, DFSTestUtil.readFile(fs, snapshottedECFile), "Contents of snapshotted file have changed unexpectedly");
// Now delete the snapshots out of order and verify the EC policy
// correctness
fs.deleteSnapshot(ecDirParent, snap2.getName());
- assertEquals("Got unexpected erasure coding policy", ecPolicy,
- fs.getErasureCodingPolicy(snap1ECDir));
- assertEquals("Got unexpected erasure coding policy", ecPolicy,
- fs.getErasureCodingPolicy(snap3ECDir));
+ assertEquals(ecPolicy,
+ fs.getErasureCodingPolicy(snap1ECDir), "Got unexpected erasure coding policy");
+ assertEquals(ecPolicy,
+ fs.getErasureCodingPolicy(snap3ECDir), "Got unexpected erasure coding policy");
fs.deleteSnapshot(ecDirParent, snap1.getName());
- assertEquals("Got unexpected erasure coding policy", ecPolicy,
- fs.getErasureCodingPolicy(snap3ECDir));
+ assertEquals(ecPolicy,
+ fs.getErasureCodingPolicy(snap3ECDir), "Got unexpected erasure coding policy");
}
/**
@@ -147,8 +147,8 @@ public void testSnapshotsOnErasureCodingDir() throws Exception {
fs.setErasureCodingPolicy(ecDir, ecPolicy.getName());
final Path snap1 = fs.createSnapshot(ecDir, "snap1");
- assertEquals("Got unexpected erasure coding policy", ecPolicy,
- fs.getErasureCodingPolicy(snap1));
+ assertEquals(ecPolicy,
+ fs.getErasureCodingPolicy(snap1), "Got unexpected erasure coding policy");
}
/**
@@ -164,8 +164,8 @@ public void testSnapshotsOnErasureCodingDirAfterNNRestart() throws Exception {
fs.setErasureCodingPolicy(ecDir, ecPolicy.getName());
final Path snap1 = fs.createSnapshot(ecDir, "snap1");
ErasureCodingPolicy ecSnap = fs.getErasureCodingPolicy(snap1);
- assertEquals("Got unexpected erasure coding policy", ecPolicy,
- ecSnap);
+ assertEquals(ecPolicy,
+ ecSnap, "Got unexpected erasure coding policy");
// save namespace, restart namenode, and check ec policy correctness.
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
@@ -174,10 +174,9 @@ public void testSnapshotsOnErasureCodingDirAfterNNRestart() throws Exception {
cluster.restartNameNode(true);
ErasureCodingPolicy ecSnap1 = fs.getErasureCodingPolicy(snap1);
- assertEquals("Got unexpected erasure coding policy", ecPolicy,
- ecSnap1);
- assertEquals("Got unexpected ecSchema", ecSnap.getSchema(),
- ecSnap1.getSchema());
+ assertEquals(ecPolicy, ecSnap1, "Got unexpected erasure coding policy");
+ assertEquals(ecSnap.getSchema(), ecSnap1.getSchema(),
+ "Got unexpected ecSchema");
}
/**
@@ -202,12 +201,12 @@ public void testCopySnapshotWillNotPreserveErasureCodingPolicy()
String[] argv = new String[] { "-cp", "-px", snap1.toUri().toString(),
snap1Copy.toUri().toString() };
int ret = ToolRunner.run(new FsShell(conf), argv);
- assertEquals("cp -px is not working on a snapshot", SUCCESS, ret);
+ assertEquals(SUCCESS, ret, "cp -px is not working on a snapshot");
- assertNull("Got unexpected erasure coding policy",
- fs.getErasureCodingPolicy(snap1CopyECDir));
- assertEquals("Got unexpected erasure coding policy", ecPolicy,
- fs.getErasureCodingPolicy(snap1));
+ assertNull(
+ fs.getErasureCodingPolicy(snap1CopyECDir), "Got unexpected erasure coding policy");
+ assertEquals(ecPolicy,
+ fs.getErasureCodingPolicy(snap1), "Got unexpected erasure coding policy");
}
@Test (timeout = 300000)
@@ -247,13 +246,13 @@ public void testErasureCodingPolicyOnDotSnapshotDir() throws IOException {
// verify the EC policy correctness
ErasureCodingPolicy ecSnap = fs.getErasureCodingPolicy(snap);
- assertEquals("Got unexpected erasure coding policy", ecPolicy,
- ecSnap);
+ assertEquals(ecPolicy,
+ ecSnap, "Got unexpected erasure coding policy");
// verify the EC policy is null, not an exception
final Path ecDotSnapshotDir = new Path(ecDir, ".snapshot");
ErasureCodingPolicy ecSnap1 = fs.getErasureCodingPolicy(ecDotSnapshotDir);
- assertNull("Got unexpected erasure coding policy", ecSnap1);
+ assertNull(ecSnap1, "Got unexpected erasure coding policy");
}
/**
@@ -268,22 +267,22 @@ public void testSnapshotsOnErasureCodingDirAfterECPolicyChanges()
fs.allowSnapshot(ecDir);
final Path snap1 = fs.createSnapshot(ecDir, "snap1");
- assertNull("Expected null erasure coding policy",
- fs.getErasureCodingPolicy(snap1));
+ assertNull(
+ fs.getErasureCodingPolicy(snap1), "Expected null erasure coding policy");
// Set erasure coding policy
final ErasureCodingPolicy ec63Policy = SystemErasureCodingPolicies
.getByID(SystemErasureCodingPolicies.RS_6_3_POLICY_ID);
fs.setErasureCodingPolicy(ecDir, ec63Policy.getName());
final Path snap2 = fs.createSnapshot(ecDir, "snap2");
- assertEquals("Got unexpected erasure coding policy", ec63Policy,
- fs.getErasureCodingPolicy(snap2));
+ assertEquals(ec63Policy,
+ fs.getErasureCodingPolicy(snap2), "Got unexpected erasure coding policy");
// Verify the EC policy correctness after the unset operation
fs.unsetErasureCodingPolicy(ecDir);
final Path snap3 = fs.createSnapshot(ecDir, "snap3");
- assertNull("Expected null erasure coding policy",
- fs.getErasureCodingPolicy(snap3));
+ assertNull(
+ fs.getErasureCodingPolicy(snap3), "Expected null erasure coding policy");
// Change the erasure coding policy and take another snapshot
final ErasureCodingPolicy ec32Policy = SystemErasureCodingPolicies
@@ -291,15 +290,15 @@ public void testSnapshotsOnErasureCodingDirAfterECPolicyChanges()
fs.enableErasureCodingPolicy(ec32Policy.getName());
fs.setErasureCodingPolicy(ecDir, ec32Policy.getName());
final Path snap4 = fs.createSnapshot(ecDir, "snap4");
- assertEquals("Got unexpected erasure coding policy", ec32Policy,
- fs.getErasureCodingPolicy(snap4));
-
- // Check that older snapshot still have the old ECPolicy settings
- assertNull("Expected null erasure coding policy",
- fs.getErasureCodingPolicy(snap1));
- assertEquals("Got unexpected erasure coding policy", ec63Policy,
- fs.getErasureCodingPolicy(snap2));
- assertNull("Expected null erasure coding policy",
- fs.getErasureCodingPolicy(snap3));
+ assertEquals(ec32Policy,
+ fs.getErasureCodingPolicy(snap4), "Got unexpected erasure coding policy");
+
+ // Check that older snapshot still have the old ECPolicy settings
+ assertNull(
+ fs.getErasureCodingPolicy(snap1), "Expected null erasure coding policy");
+ assertEquals(ec63Policy,
+ fs.getErasureCodingPolicy(snap2), "Got unexpected erasure coding policy");
+ assertNull(
+ fs.getErasureCodingPolicy(snap3), "Expected null erasure coding policy");
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestExtendedAcls.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestExtendedAcls.java
index b4baadfa041ca..0d19c109fe538 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestExtendedAcls.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestExtendedAcls.java
@@ -27,9 +27,9 @@
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Lists;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
@@ -49,9 +49,7 @@
import static org.apache.hadoop.fs.permission.FsAction.ALL;
import static org.apache.hadoop.fs.permission.AclEntryType.GROUP;
import static org.apache.hadoop.fs.permission.AclEntryType.OTHER;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
/**
* A class for testing the behavior of HDFS directory and file ACL.
@@ -65,7 +63,7 @@ public class TestExtendedAcls {
private static DistributedFileSystem hdfs;
- @BeforeClass
+ @BeforeAll
public static void setup() throws IOException {
conf = new Configuration();
conf.setBoolean(DFS_NAMENODE_ACLS_ENABLED_KEY, true);
@@ -76,7 +74,7 @@ public static void setup() throws IOException {
hdfs = cluster.getFileSystem();
}
- @AfterClass
+ @AfterAll
public static void shutdown() throws IOException {
if (cluster != null) {
cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestExternalBlockReader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestExternalBlockReader.java
index f153b2c9d1724..b3ad93166e0a5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestExternalBlockReader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestExternalBlockReader.java
@@ -28,8 +28,8 @@
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.NetUtils;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.nio.BufferOverflowException;
@@ -63,7 +63,7 @@ public void testMisconfiguredExternalBlockReader() throws Exception {
IOUtils.readFully(stream, buf, 0, TEST_LENGTH);
byte expected[] = DFSTestUtil.
calculateFileContentsFromSeed(SEED, TEST_LENGTH);
- Assert.assertArrayEquals(expected, buf);
+ Assertions.assertArrayEquals(expected, buf);
stream.close();
} finally {
dfs.close();
@@ -293,36 +293,36 @@ public void testExternalBlockReader() throws Exception {
byte expected[] = DFSTestUtil.
calculateFileContentsFromSeed(SEED, TEST_LENGTH);
ReadStatistics stats = stream.getReadStatistics();
- Assert.assertEquals(1024, stats.getTotalShortCircuitBytesRead());
- Assert.assertEquals(2047, stats.getTotalLocalBytesRead());
- Assert.assertEquals(2047, stats.getTotalBytesRead());
- Assert.assertArrayEquals(expected, buf);
+ Assertions.assertEquals(1024, stats.getTotalShortCircuitBytesRead());
+ Assertions.assertEquals(2047, stats.getTotalLocalBytesRead());
+ Assertions.assertEquals(2047, stats.getTotalBytesRead());
+ Assertions.assertArrayEquals(expected, buf);
stream.close();
ExtendedBlock block = DFSTestUtil.getFirstBlock(dfs, new Path("/a"));
- Assert.assertNotNull(block);
+ Assertions.assertNotNull(block);
LinkedList accessorList = accessors.get(uuid);
- Assert.assertNotNull(accessorList);
- Assert.assertEquals(3, accessorList.size());
+ Assertions.assertNotNull(accessorList);
+ Assertions.assertEquals(3, accessorList.size());
SyntheticReplicaAccessor accessor = accessorList.get(0);
- Assert.assertTrue(accessor.builder.allowShortCircuit);
- Assert.assertEquals(block.getBlockPoolId(),
+ Assertions.assertTrue(accessor.builder.allowShortCircuit);
+ Assertions.assertEquals(block.getBlockPoolId(),
accessor.builder.blockPoolId);
- Assert.assertEquals(block.getBlockId(),
+ Assertions.assertEquals(block.getBlockId(),
accessor.builder.blockId);
- Assert.assertEquals(dfs.getClient().clientName,
+ Assertions.assertEquals(dfs.getClient().clientName,
accessor.builder.clientName);
- Assert.assertEquals("/a", accessor.builder.fileName);
- Assert.assertEquals(block.getGenerationStamp(),
+ Assertions.assertEquals("/a", accessor.builder.fileName);
+ Assertions.assertEquals(block.getGenerationStamp(),
accessor.getGenerationStamp());
- Assert.assertTrue(accessor.builder.verifyChecksum);
- Assert.assertEquals(1024L, accessor.builder.visibleLength);
- Assert.assertEquals(24L, accessor.totalRead);
- Assert.assertEquals("", accessor.getError());
- Assert.assertEquals(1, accessor.numCloses);
+ Assertions.assertTrue(accessor.builder.verifyChecksum);
+ Assertions.assertEquals(1024L, accessor.builder.visibleLength);
+ Assertions.assertEquals(24L, accessor.totalRead);
+ Assertions.assertEquals("", accessor.getError());
+ Assertions.assertEquals(1, accessor.numCloses);
byte[] tempBuf = new byte[5];
- Assert.assertEquals(-1, accessor.read(TEST_LENGTH,
+ Assertions.assertEquals(-1, accessor.read(TEST_LENGTH,
tempBuf, 0, 0));
- Assert.assertEquals(-1, accessor.read(TEST_LENGTH,
+ Assertions.assertEquals(-1, accessor.read(TEST_LENGTH,
tempBuf, 0, tempBuf.length));
accessors.remove(uuid);
} finally {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSInputChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSInputChecker.java
index e7f3b9fc34238..e471e1bfb3ab9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSInputChecker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSInputChecker.java
@@ -17,10 +17,7 @@
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
import java.io.EOFException;
import java.io.File;
@@ -40,7 +37,7 @@
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.PathUtils;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
/**
* This class tests if FSInputChecker works correctly.
@@ -71,9 +68,9 @@ private void writeFile(FileSystem fileSys, Path name) throws IOException {
private void checkAndEraseData(byte[] actual, int from, byte[] expected,
String message) throws Exception {
for (int idx = 0; idx < actual.length; idx++) {
- assertEquals(message+" byte "+(from+idx)+" differs. expected "+
- expected[from+idx]+" actual "+actual[idx],
- actual[idx], expected[from+idx]);
+ assertEquals(
+ actual[idx], expected[from + idx], message + " byte " + (from + idx) + " differs. expected " +
+ expected[from + idx] + " actual " + actual[idx]);
actual[idx] = 0;
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSOutputSummer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSOutputSummer.java
index 9dcd449661bbd..d63bb99b90d2f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSOutputSummer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSOutputSummer.java
@@ -18,8 +18,8 @@
package org.apache.hadoop.hdfs;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.IOException;
import java.util.Random;
@@ -29,7 +29,7 @@
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
/**
* This class tests if FSOutputSummer works correctly.
@@ -90,9 +90,9 @@ private void writeFile3(Path name) throws Exception {
private void checkAndEraseData(byte[] actual, int from, byte[] expected,
String message) throws Exception {
for (int idx = 0; idx < actual.length; idx++) {
- assertEquals(message+" byte "+(from+idx)+" differs. expected "+
- expected[from+idx]+" actual "+actual[idx],
- actual[idx], expected[from+idx]);
+ assertEquals(
+ actual[idx], expected[from + idx], message + " byte " + (from + idx) + " differs. expected " +
+ expected[from + idx] + " actual " + actual[idx]);
actual[idx] = 0;
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFetchImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFetchImage.java
index 7e1e5938a13e4..c68d122dd4bf6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFetchImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFetchImage.java
@@ -20,7 +20,7 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
import java.io.File;
import java.io.IOException;
@@ -41,10 +41,10 @@
import org.apache.hadoop.io.MD5Hash;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.PathUtils;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
public class TestFetchImage {
@@ -58,17 +58,17 @@ public class TestFetchImage {
private NameNode nn1 = null;
private Configuration conf = null;
- @BeforeClass
+ @BeforeAll
public static void setupImageDir() {
FETCHED_IMAGE_FILE.mkdirs();
}
- @AfterClass
+ @AfterAll
public static void cleanup() {
FileUtil.fullyDelete(FETCHED_IMAGE_FILE);
}
- @Before
+ @BeforeEach
public void setupCluster() throws IOException, URISyntaxException {
conf = new Configuration();
conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY, 1);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
index b65301f8c576b..485f5f7e34e96 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
@@ -17,9 +17,8 @@
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
import java.io.File;
import java.io.FileNotFoundException;
@@ -31,6 +30,8 @@
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeoutException;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.CreateFlag;
@@ -58,8 +59,8 @@
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.Time;
-import org.junit.Assert;
import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
/**
* This class tests the building blocks that are needed to
@@ -137,8 +138,8 @@ public void testBreakHardlinksIfNeeded() throws IOException {
// Get a handle to the datanode
DataNode[] dn = cluster.listDataNodes();
- assertTrue("There should be only one datanode but found " + dn.length,
- dn.length == 1);
+ assertTrue(
+ dn.length == 1, "There should be only one datanode but found " + dn.length);
LocatedBlocks locations = client.getNamenode().getBlockLocations(
file1.toString(), 0, Long.MAX_VALUE);
@@ -161,8 +162,8 @@ public void testBreakHardlinksIfNeeded() throws IOException {
for (int i = 0; i < blocks.size(); i++) {
ExtendedBlock b = blocks.get(i).getBlock();
System.out.println("breakHardlinksIfNeeded detaching block " + b);
- assertTrue("breakHardlinksIfNeeded(" + b + ") should have returned true",
- FsDatasetTestUtil.breakHardlinksIfNeeded(fsd, b));
+ assertTrue(
+ FsDatasetTestUtil.breakHardlinksIfNeeded(fsd, b), "breakHardlinksIfNeeded(" + b + ") should have returned true");
}
// Since the blocks were already detached earlier, these calls should
@@ -171,8 +172,8 @@ public void testBreakHardlinksIfNeeded() throws IOException {
ExtendedBlock b = blocks.get(i).getBlock();
System.out.println("breakHardlinksIfNeeded re-attempting to " +
"detach block " + b);
- assertTrue("breakHardlinksIfNeeded(" + b + ") should have returned false",
- FsDatasetTestUtil.breakHardlinksIfNeeded(fsd, b));
+ assertTrue(
+ FsDatasetTestUtil.breakHardlinksIfNeeded(fsd, b), "breakHardlinksIfNeeded(" + b + ") should have returned false");
}
} finally {
client.close();
@@ -335,10 +336,10 @@ public void testAppendTwice() throws Exception {
//2nd append should get AlreadyBeingCreatedException
fs1.append(p);
- Assert.fail();
+ Assertions.fail();
} catch(RemoteException re) {
AppendTestUtil.LOG.info("Got an exception:", re);
- Assert.assertEquals(AlreadyBeingCreatedException.class.getName(),
+ Assertions.assertEquals(AlreadyBeingCreatedException.class.getName(),
re.getClassName());
} finally {
fs2.close();
@@ -376,10 +377,10 @@ public void testAppend2Twice() throws Exception {
// 2nd append should get AlreadyBeingCreatedException
fs1.append(p);
- Assert.fail();
+ Assertions.fail();
} catch(RemoteException re) {
AppendTestUtil.LOG.info("Got an exception:", re);
- Assert.assertEquals(AlreadyBeingCreatedException.class.getName(),
+ Assertions.assertEquals(AlreadyBeingCreatedException.class.getName(),
re.getClassName());
} finally {
fs2.close();
@@ -428,13 +429,13 @@ public void testMultipleAppends() throws Exception {
fileLen += appendLen;
}
- Assert.assertEquals(fileLen, fs.getFileStatus(p).getLen());
+ Assertions.assertEquals(fileLen, fs.getFileStatus(p).getLen());
final byte[] actual = new byte[fileLen];
final FSDataInputStream in = fs.open(p);
in.readFully(actual);
in.close();
for(int i = 0; i < fileLen; i++) {
- Assert.assertEquals(data[i], actual[i]);
+ Assertions.assertEquals(data[i], actual[i]);
}
} finally {
fs.close();
@@ -675,7 +676,7 @@ public void testAppendCorruptedBlock() throws Exception {
Path fileName = new Path("/appendCorruptBlock");
DFSTestUtil.createFile(fs, fileName, 512, (short) 1, 0);
DFSTestUtil.waitReplication(fs, fileName, (short) 1);
- Assert.assertTrue("File not created", fs.exists(fileName));
+ Assertions.assertTrue(fs.exists(fileName), "File not created");
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
cluster.corruptBlockOnDataNodes(block);
DFSTestUtil.appendFile(fs, fileName, "appendCorruptBlock");
@@ -707,7 +708,7 @@ public void testConcurrentAppendRead()
Path fileName = new Path("/appendCorruptBlock");
DFSTestUtil.createFile(fs, fileName, initialFileLength, (short) 1, 0);
DFSTestUtil.waitReplication(fs, fileName, (short) 1);
- Assert.assertTrue("File not created", fs.exists(fileName));
+ Assertions.assertTrue(fs.exists(fileName), "File not created");
// Call FsDatasetImpl#append to append the block file,
// which converts it to a rbw replica.
@@ -738,7 +739,7 @@ public void testConcurrentAppendRead()
// checksum, rather than on-disk checksum. Otherwise it will see a
// checksum mismatch error.
final byte[] readBlock = DFSTestUtil.readFileBuffer(fs, fileName);
- assertEquals("should have read only one byte!", 1, readBlock.length);
+ assertEquals(1, readBlock.length, "should have read only one byte!");
} finally {
cluster.shutdown();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java
index 9929cb24b23f5..2f5cff0b8f4dd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java
@@ -17,10 +17,7 @@
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
import java.io.IOException;
import java.util.ArrayList;
@@ -42,7 +39,7 @@
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
import org.slf4j.event.Level;
/**
@@ -445,10 +442,10 @@ public void run() {
} catch (InterruptedException e) {}
}
- assertTrue("File " + testfile + " size is " +
- fs.getFileStatus(testfile).getLen() +
- " but expected " + (len + sizeToAppend),
- fs.getFileStatus(testfile).getLen() == (len + sizeToAppend));
+ assertTrue(
+ fs.getFileStatus(testfile).getLen() == (len + sizeToAppend), "File " + testfile + " size is " +
+ fs.getFileStatus(testfile).getLen() +
+ " but expected " + (len + sizeToAppend));
AppendTestUtil.checkFullFile(fs, testfile, (int) (len + sizeToAppend),
fileContents, "Read 2");
@@ -460,9 +457,9 @@ public void run() {
" " + e);
e.printStackTrace();
}
- assertTrue("Workload exception " + id + " testfile " + testfile +
- " expected size " + (len + sizeToAppend),
- false);
+ assertTrue(
+ false, "Workload exception " + id + " testfile " + testfile +
+ " expected size " + (len + sizeToAppend));
}
// Add testfile back to the pool of files.
@@ -527,10 +524,10 @@ private void testComplexAppend(boolean appendToNewBlock) throws IOException {
cluster.shutdown();
}
- // If any of the worker thread failed in their job, indicate that
- // this test failed.
- //
- assertTrue("testComplexAppend Worker encountered exceptions.", globalStatus);
+ // If any of the worker thread failed in their job, indicate that
+ // this test failed.
+ //
+ assertTrue(globalStatus, "testComplexAppend Worker encountered exceptions.");
}
@Test
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java
index 3e9adcac7e7e0..8d428795e17c0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java
@@ -17,9 +17,7 @@
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
import java.io.IOException;
import java.util.EnumSet;
@@ -46,10 +44,10 @@
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
import org.slf4j.event.Level;
/** This class implements some of tests posted in HADOOP-2658. */
@@ -70,7 +68,7 @@ public class TestFileAppend3 {
private static MiniDFSCluster cluster;
private static DistributedFileSystem fs;
- @BeforeClass
+ @BeforeAll
public static void setUp() throws java.lang.Exception {
AppendTestUtil.LOG.info("setUp()");
conf = new HdfsConfiguration();
@@ -80,7 +78,7 @@ public static void setUp() throws java.lang.Exception {
fs = cluster.getFileSystem();
}
- @AfterClass
+ @AfterAll
public static void tearDown() throws Exception {
AppendTestUtil.LOG.info("tearDown()");
if(fs != null) fs.close();
@@ -201,10 +199,10 @@ public void testTC2ForAppend2() throws Exception {
AppendTestUtil.check(fs, p, len1 + len2);
List blocks = fs.getClient().getLocatedBlocks(
p.toString(), 0L).getLocatedBlocks();
- Assert.assertEquals(3, blocks.size());
- Assert.assertEquals(BLOCK_SIZE, blocks.get(0).getBlockSize());
- Assert.assertEquals(BLOCK_SIZE / 2, blocks.get(1).getBlockSize());
- Assert.assertEquals(BLOCK_SIZE / 4, blocks.get(2).getBlockSize());
+ Assertions.assertEquals(3, blocks.size());
+ Assertions.assertEquals(BLOCK_SIZE, blocks.get(0).getBlockSize());
+ Assertions.assertEquals(BLOCK_SIZE / 2, blocks.get(1).getBlockSize());
+ Assertions.assertEquals(BLOCK_SIZE / 4, blocks.get(2).getBlockSize());
}
/**
@@ -429,9 +427,9 @@ private void testTC12(boolean appendToNewBlock) throws Exception {
AppendTestUtil.check(fs, p, len1 + len2);
if (appendToNewBlock) {
LocatedBlocks blks = fs.dfs.getLocatedBlocks(p.toString(), 0);
- Assert.assertEquals(2, blks.getLocatedBlocks().size());
- Assert.assertEquals(len1, blks.getLocatedBlocks().get(0).getBlockSize());
- Assert.assertEquals(len2, blks.getLocatedBlocks().get(1).getBlockSize());
+ Assertions.assertEquals(2, blks.getLocatedBlocks().size());
+ Assertions.assertEquals(len1, blks.getLocatedBlocks().get(0).getBlockSize());
+ Assertions.assertEquals(len2, blks.getLocatedBlocks().get(1).getBlockSize());
AppendTestUtil.check(fs, p, 0, len1);
AppendTestUtil.check(fs, p, len1, len2);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java
index 8c672b585df9b..8d8a2004e78b8 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java
@@ -18,9 +18,7 @@
package org.apache.hadoop.hdfs;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
import static org.mockito.ArgumentMatchers.anyLong;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyString;
@@ -47,8 +45,8 @@
import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Before;
import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
import org.slf4j.event.Level;
/* File Append tests for HDFS-200 & HDFS-142, specifically focused on:
@@ -72,7 +70,7 @@ public class TestFileAppend4 {
GenericTestUtils.setLogLevel(DFSClient.LOG, Level.TRACE);
}
- @Before
+ @BeforeEach
public void setUp() throws Exception {
this.conf = new Configuration();
@@ -379,7 +377,7 @@ public void testAppendInsufficientLocations() throws Exception {
FSDirectory dir = cluster.getNamesystem().getFSDirectory();
final INodeFile inode = INodeFile.
valueOf(dir.getINode("/testAppend"), "/testAppend");
- assertTrue("File should remain closed", !inode.isUnderConstruction());
+ assertTrue(!inode.isUnderConstruction(), "File should remain closed");
} finally {
if (null != fileSystem) {
fileSystem.close();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java
index a2b344cb65b94..ca72e91e7d57e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
import java.io.File;
import java.io.IOException;
@@ -38,7 +38,7 @@
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.PathUtils;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
/**
* Unit test to make sure that Append properly logs the right
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java
index c19d8c3e4a5e2..d583e2158ec05 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java
@@ -30,11 +30,11 @@
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector;
import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
import org.junit.rules.ExpectedException;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
@@ -100,7 +100,7 @@ public static Object[] getParameters() {
@Rule
public ExpectedException exception = ExpectedException.none();
- @Before
+ @BeforeEach
public void setup() throws IOException {
int numDNs = dataBlocks + parityBlocks + 2;
conf = new Configuration();
@@ -124,7 +124,7 @@ public void setup() throws IOException {
GenericTestUtils.setLogLevel(FileChecksumHelper.LOG, Level.DEBUG);
}
- @After
+ @AfterEach
public void tearDown() {
if (cluster != null) {
cluster.shutdown();
@@ -194,9 +194,9 @@ private void testStripedFileChecksum(int range1, int range2)
LOG.info("stripedFileChecksum2:" + stripedFileChecksum2);
LOG.info("stripedFileChecksum3:" + stripedFileChecksum3);
- Assert.assertTrue(stripedFileChecksum1.equals(stripedFileChecksum2));
+ Assertions.assertTrue(stripedFileChecksum1.equals(stripedFileChecksum2));
if (range1 >=0 && range1 != range2) {
- Assert.assertFalse(stripedFileChecksum1.equals(stripedFileChecksum3));
+ Assertions.assertFalse(stripedFileChecksum1.equals(stripedFileChecksum3));
}
}
@@ -209,9 +209,9 @@ public void testStripedAndReplicatedFileChecksum() throws Exception {
10, false);
if (checksumCombineMode.equals(ChecksumCombineMode.COMPOSITE_CRC.name())) {
- Assert.assertEquals(stripedFileChecksum1, replicatedFileChecksum);
+ Assertions.assertEquals(stripedFileChecksum1, replicatedFileChecksum);
} else {
- Assert.assertNotEquals(stripedFileChecksum1, replicatedFileChecksum);
+ Assertions.assertNotEquals(stripedFileChecksum1, replicatedFileChecksum);
}
}
@@ -228,9 +228,9 @@ public void testDifferentBlockSizeReplicatedFileChecksum() throws Exception {
FileChecksum checksum2 = getFileChecksum(replicatedFile2, -1, false);
if (checksumCombineMode.equals(ChecksumCombineMode.COMPOSITE_CRC.name())) {
- Assert.assertEquals(checksum1, checksum2);
+ Assertions.assertEquals(checksum1, checksum2);
} else {
- Assert.assertNotEquals(checksum1, checksum2);
+ Assertions.assertNotEquals(checksum1, checksum2);
}
}
@@ -245,8 +245,8 @@ public void testStripedFileChecksumWithMissedDataBlocks1() throws Exception {
LOG.info("stripedFileChecksum1:" + stripedFileChecksum1);
LOG.info("stripedFileChecksumRecon:" + stripedFileChecksumRecon);
- Assert.assertTrue("Checksum mismatches!",
- stripedFileChecksum1.equals(stripedFileChecksumRecon));
+ Assertions.assertTrue(
+ stripedFileChecksum1.equals(stripedFileChecksumRecon), "Checksum mismatches!");
}
@Test(timeout = 90000)
@@ -263,12 +263,12 @@ public void testStripedFileChecksumWithMissedDataBlocks2() throws Exception {
LOG.info("stripedFileChecksum2:" + stripedFileChecksum1);
LOG.info("stripedFileChecksum2Recon:" + stripedFileChecksum2Recon);
- Assert.assertTrue("Checksum mismatches!",
- stripedFileChecksum1.equals(stripedFileChecksum2));
- Assert.assertTrue("Checksum mismatches!",
- stripedFileChecksum1.equals(stripedFileChecksum2Recon));
- Assert.assertTrue("Checksum mismatches!",
- stripedFileChecksum2.equals(stripedFileChecksum2Recon));
+ Assertions.assertTrue(
+ stripedFileChecksum1.equals(stripedFileChecksum2), "Checksum mismatches!");
+ Assertions.assertTrue(
+ stripedFileChecksum1.equals(stripedFileChecksum2Recon), "Checksum mismatches!");
+ Assertions.assertTrue(
+ stripedFileChecksum2.equals(stripedFileChecksum2Recon), "Checksum mismatches!");
}
private void testStripedFileChecksumWithMissedDataBlocksRangeQuery(
@@ -284,8 +284,8 @@ private void testStripedFileChecksumWithMissedDataBlocksRangeQuery(
LOG.info("stripedFileChecksum1:" + stripedFileChecksum1);
LOG.info("stripedFileChecksumRecon:" + stripedFileChecksumRecon);
- Assert.assertTrue("Checksum mismatches!",
- stripedFileChecksum1.equals(stripedFileChecksumRecon));
+ Assertions.assertTrue(
+ stripedFileChecksum1.equals(stripedFileChecksumRecon), "Checksum mismatches!");
}
/**
@@ -544,8 +544,8 @@ public void testStripedFileChecksumWithReconstructFail()
// getting result.
FileChecksum fileChecksum1 = getFileChecksum(stripedFile4, -1, true);
- Assert.assertEquals("checksum should be same", fileChecksum,
- fileChecksum1);
+ Assertions.assertEquals(fileChecksum,
+ fileChecksum1, "checksum should be same");
} finally {
DataNodeFaultInjector.set(oldInjector);
}
@@ -578,7 +578,7 @@ public void testMixedBytesPerChecksum() throws Exception {
DFSTestUtil.writeFile(fs, new Path(replicatedFile2), fileData);
FileChecksum checksum1 = getFileChecksum(replicatedFile1, -1, false);
FileChecksum checksum2 = getFileChecksum(replicatedFile2, -1, false);
- Assert.assertEquals(checksum1, checksum2);
+ Assertions.assertEquals(checksum1, checksum2);
} else {
exception.expect(IOException.class);
FileChecksum checksum = getFileChecksum(replicatedFile1, -1, false);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java
index 0c7a3fcaae22d..e4cad8bc7fbfa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java
@@ -16,10 +16,7 @@
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
import java.io.IOException;
import java.util.Arrays;
@@ -38,10 +35,10 @@
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.StringUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Ignore;
import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Disabled;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.event.Level;
@@ -77,13 +74,13 @@ private enum SyncType {
private FileSystem fileSystem;
- @Before
+ @BeforeEach
public void setUp() throws IOException {
conf = new Configuration();
init(conf);
}
- @After
+ @AfterEach
public void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
@@ -124,10 +121,10 @@ private void assertBytesAvailable(
IOUtils.readFully(inputStream, buffer, 0, numBytes);
inputStream.close();
- assertTrue(
- "unable to validate bytes",
- validateSequentialBytes(buffer, 0, numBytes)
- );
+ assertTrue(
+ validateSequentialBytes(buffer, 0, numBytes)
+ ,
+ "unable to validate bytes");
}
private void waitForBlocks(FileSystem fileSys, Path name)
@@ -273,8 +270,8 @@ public void run() {
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
-
- assertNull(errorMessage.get(), errorMessage.get());
+
+ assertNull(errorMessage.get(), errorMessage.get());
}
// for some reason, using tranferTo evokes the race condition more often
@@ -291,7 +288,7 @@ public void testUnfinishedBlockCRCErrorTransferToVerySmallWrite()
}
// fails due to issue w/append, disable
- @Ignore
+ @Disabled
public void _testUnfinishedBlockCRCErrorTransferToAppend()
throws IOException {
runTestUnfinishedBlockCRCError(true, SyncType.APPEND, DEFAULT_WRITE_SIZE);
@@ -309,7 +306,7 @@ public void testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite()
}
// fails due to issue w/append, disable
- @Ignore
+ @Disabled
public void _testUnfinishedBlockCRCErrorNormalTransferAppend()
throws IOException {
runTestUnfinishedBlockCRCError(false, SyncType.APPEND, DEFAULT_WRITE_SIZE);
@@ -407,9 +404,9 @@ public void run() {
writer.join();
tailer.join();
- assertFalse(
- "error occurred, see log above", error.get()
- );
+ assertFalse(error.get()
+ ,
+ "error occurred, see log above");
} catch (InterruptedException e) {
LOG.info("interrupted waiting for writer or tailer to complete");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
index 381cf1694f5d4..ea181b5180d72 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
@@ -22,9 +22,7 @@
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
import java.io.DataInputStream;
import java.io.DataOutputStream;
@@ -55,7 +53,7 @@
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.PathUtils;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
import org.slf4j.event.Level;
@@ -86,7 +84,7 @@ public void testFileCorruption() throws Exception {
DataNode dn = cluster.getDataNodes().get(2);
Map blockReports =
dn.getFSDataset().getBlockReports(bpid);
- assertTrue("Blocks do not exist on data-dir", !blockReports.isEmpty());
+ assertTrue(!blockReports.isEmpty(), "Blocks do not exist on data-dir");
for (BlockListAsLongs report : blockReports.values()) {
for (BlockReportReplica brr : report) {
LOG.info("Deliberately removing block {}", brr.getBlockName());
@@ -94,8 +92,8 @@ public void testFileCorruption() throws Exception {
new ExtendedBlock(bpid, brr)).deleteData();
}
}
- assertTrue("Corrupted replicas not handled properly.",
- util.checkFiles(fs, "/srcdat"));
+ assertTrue(
+ util.checkFiles(fs, "/srcdat"), "Corrupted replicas not handled properly.");
util.cleanup(fs, "/srcdat");
} finally {
if (cluster != null) { cluster.shutdown(); }
@@ -146,8 +144,8 @@ public void testArrayOutOfBoundsException() throws Exception {
// get the block
final String bpid = cluster.getNamesystem().getBlockPoolId();
ExtendedBlock blk = getFirstBlock(cluster.getDataNodes().get(0), bpid);
- assertFalse("Data directory does not contain any blocks or there was an "
- + "IO error", blk==null);
+ assertFalse(blk == null, "Data directory does not contain any blocks or there was an "
+ + "IO error");
// start a third datanode
cluster.startDataNodes(conf, 1, true, null, null);
@@ -197,14 +195,14 @@ public void testCorruptionWithDiskFailure() throws Exception {
final String bpid = cluster.getNamesystem().getBlockPoolId();
File storageDir = cluster.getInstanceStorageDir(0, 0);
File dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
- assertTrue("Data directory does not exist", dataDir.exists());
+ assertTrue(dataDir.exists(), "Data directory does not exist");
ExtendedBlock blk = getFirstBlock(cluster.getDataNodes().get(0), bpid);
if (blk == null) {
blk = getFirstBlock(cluster.getDataNodes().get(0), bpid);
}
- assertFalse("Data directory does not contain any blocks or there was an" +
- " " +
- "IO error", blk == null);
+ assertFalse(blk == null, "Data directory does not contain any blocks or there was an" +
+ " " +
+ "IO error");
ArrayList datanodes = cluster.getDataNodes();
assertEquals(datanodes.size(), 3);
FSNamesystem ns = cluster.getNamesystem();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
index a7cf68b10168f..b4fc1febc353b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
@@ -34,11 +34,8 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.junit.Assume.assumeTrue;
+import static org.junit.jupiter.api.Assertions.*;
+import static org.junit.jupiter.api.Assumptions.assumeTrue;
import static org.mockito.Mockito.doReturn;
import java.io.BufferedReader;
@@ -89,8 +86,8 @@
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Time;
-import org.junit.Assert;
import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
import org.slf4j.event.Level;
/**
@@ -349,9 +346,9 @@ public void checkFileCreation(String netIf, boolean useDnHostname)
//
Path path = new Path("/");
System.out.println("Path : \"" + path.toString() + "\"");
- System.out.println(fs.getFileStatus(path).isDirectory());
- assertTrue("/ should be a directory",
- fs.getFileStatus(path).isDirectory());
+ System.out.println(fs.getFileStatus(path).isDirectory());
+ assertTrue(
+ fs.getFileStatus(path).isDirectory(), "/ should be a directory");
//
// Create a directory inside /, then try to overwrite it
@@ -363,7 +360,7 @@ public void checkFileCreation(String netIf, boolean useDnHostname)
try {
fs.create(dir1, true); // Create path, overwrite=true
fs.close();
- assertTrue("Did not prevent directory from being overwritten.", false);
+ assertTrue(false, "Did not prevent directory from being overwritten.");
} catch (FileAlreadyExistsException e) {
// expected
}
@@ -378,9 +375,9 @@ public void checkFileCreation(String netIf, boolean useDnHostname)
dfs.setQuota(file1.getParent(), 100L, blockSize*5);
FSDataOutputStream stm = createFile(fs, file1, 1);
- // verify that file exists in FS namespace
- assertTrue(file1 + " should be a file",
- fs.getFileStatus(file1).isFile());
+ // verify that file exists in FS namespace
+ assertTrue(
+ fs.getFileStatus(file1).isFile(), file1 + " should be a file");
System.out.println("Path : \"" + file1 + "\"");
// write to file
@@ -390,14 +387,14 @@ public void checkFileCreation(String netIf, boolean useDnHostname)
// verify that file size has changed to the full size
long len = fs.getFileStatus(file1).getLen();
- assertTrue(file1 + " should be of size " + fileSize +
- " but found to be of size " + len,
- len == fileSize);
+ assertTrue(
+ len == fileSize, file1 + " should be of size " + fileSize +
+ " but found to be of size " + len);
// verify the disk space the file occupied
long diskSpace = dfs.getContentSummary(file1.getParent()).getLength();
- assertEquals(file1 + " should take " + fileSize + " bytes disk space " +
- "but found to take " + diskSpace + " bytes", fileSize, diskSpace);
+ assertEquals(fileSize, diskSpace, file1 + " should take " + fileSize + " bytes disk space " +
+ "but found to take " + diskSpace + " bytes");
// Check storage usage
// can't check capacities for real storage since the OS file system may be changing under us.
@@ -461,12 +458,12 @@ public void testDeleteOnExit() throws IOException {
fs = cluster.getFileSystem();
localfs = FileSystem.getLocal(conf);
- assertTrue(file1 + " still exists inspite of deletOnExit set.",
- !fs.exists(file1));
- assertTrue(file2 + " still exists inspite of deletOnExit set.",
- !fs.exists(file2));
- assertTrue(file3 + " still exists inspite of deletOnExit set.",
- !localfs.exists(file3));
+ assertTrue(
+ !fs.exists(file1), file1 + " still exists inspite of deletOnExit set.");
+ assertTrue(
+ !fs.exists(file2), file2 + " still exists inspite of deletOnExit set.");
+ assertTrue(
+ !localfs.exists(file3), file3 + " still exists inspite of deletOnExit set.");
System.out.println("DeleteOnExit successful.");
} finally {
@@ -560,9 +557,9 @@ public void testFileCreationError1() throws IOException {
Path file1 = new Path("/filestatus.dat");
FSDataOutputStream stm = createFile(fs, file1, 1);
- // verify that file exists in FS namespace
- assertTrue(file1 + " should be a file",
- fs.getFileStatus(file1).isFile());
+ // verify that file exists in FS namespace
+ assertTrue(
+ fs.getFileStatus(file1).isFile(), file1 + " should be a file");
System.out.println("Path : \"" + file1 + "\"");
// kill the datanode
@@ -598,8 +595,8 @@ public void testFileCreationError1() throws IOException {
LocatedBlocks locations = client.getNamenode().getBlockLocations(
file1.toString(), 0, Long.MAX_VALUE);
System.out.println("locations = " + locations.locatedBlockCount());
- assertTrue("Error blocks were not cleaned up",
- locations.locatedBlockCount() == 0);
+ assertTrue(
+ locations.locatedBlockCount() == 0, "Error blocks were not cleaned up");
} finally {
cluster.shutdown();
client.close();
@@ -734,14 +731,14 @@ public void testFileCreationNamenodeRestart()
HdfsDataOutputStream stm = create(fs, file1, 1);
System.out.println("testFileCreationNamenodeRestart: "
+ "Created file " + file1);
- assertEquals(file1 + " should be replicated to 1 datanode.", 1,
- stm.getCurrentBlockReplication());
+ assertEquals(1,
+ stm.getCurrentBlockReplication(), file1 + " should be replicated to 1 datanode.");
// write two full blocks.
writeFile(stm, numBlocks * blockSize);
stm.hflush();
- assertEquals(file1 + " should still be replicated to 1 datanode.", 1,
- stm.getCurrentBlockReplication());
+ assertEquals(1,
+ stm.getCurrentBlockReplication(), file1 + " should still be replicated to 1 datanode.");
// rename file wile keeping it open.
Path fileRenamed = new Path("/filestatusRenamed.dat");
@@ -838,15 +835,15 @@ public void testFileCreationNamenodeRestart()
LocatedBlocks locations = client.getNamenode().getBlockLocations(
file1.toString(), 0, Long.MAX_VALUE);
System.out.println("locations = " + locations.locatedBlockCount());
- assertTrue("Error blocks were not cleaned up for file " + file1,
- locations.locatedBlockCount() == 3);
+ assertTrue(
+ locations.locatedBlockCount() == 3, "Error blocks were not cleaned up for file " + file1);
// verify filestatus2.dat
locations = client.getNamenode().getBlockLocations(
file2.toString(), 0, Long.MAX_VALUE);
System.out.println("locations = " + locations.locatedBlockCount());
- assertTrue("Error blocks were not cleaned up for file " + file2,
- locations.locatedBlockCount() == 1);
+ assertTrue(
+ locations.locatedBlockCount() == 1, "Error blocks were not cleaned up for file " + file2);
} finally {
IOUtils.closeStream(fs);
cluster.shutdown();
@@ -882,9 +879,9 @@ public void testDFSClientDeath() throws IOException, InterruptedException {
// This should close all existing file.
dfsclient.close();
- // reopen file system and verify that file exists.
- assertTrue(file1 + " does not exist.",
- AppendTestUtil.createHdfsWithDifferentUsername(conf).exists(file1));
+ // reopen file system and verify that file exists.
+ assertTrue(
+ AppendTestUtil.createHdfsWithDifferentUsername(conf).exists(file1), file1 + " does not exist.");
} finally {
cluster.shutdown();
}
@@ -926,19 +923,19 @@ public static void testFileCreationNonRecursive(FileSystem fs) throws IOExceptio
// Create a file when parent dir exists as file, should fail
expectedException = createNonRecursive(fs, new Path(path, "Create"), 1, createFlag);
- assertTrue("Create a file when parent directory exists as a file"
- + " should throw ParentNotDirectoryException ",
- expectedException != null
- && expectedException instanceof ParentNotDirectoryException);
+ assertTrue(
+ expectedException != null
+ && expectedException instanceof ParentNotDirectoryException, "Create a file when parent directory exists as a file"
+ + " should throw ParentNotDirectoryException ");
fs.delete(path, true);
// Create a file in a non-exist directory, should fail
final Path path2 = new Path(nonExistDir + "/testCreateNonRecursive");
expectedException = createNonRecursive(fs, path2, 1, createFlag);
- assertTrue("Create a file in a non-exist dir using"
- + " createNonRecursive() should throw FileNotFoundException ",
- expectedException != null
- && expectedException instanceof FileNotFoundException);
+ assertTrue(
+ expectedException != null
+ && expectedException instanceof FileNotFoundException, "Create a file in a non-exist dir using"
+ + " createNonRecursive() should throw FileNotFoundException ");
EnumSet overwriteFlag =
EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE);
@@ -948,20 +945,20 @@ public static void testFileCreationNonRecursive(FileSystem fs) throws IOExceptio
// Overwrite a file when parent dir exists as file, should fail
expectedException = createNonRecursive(fs, new Path(path, "Overwrite"), 1, overwriteFlag);
- assertTrue("Overwrite a file when parent directory exists as a file"
- + " should throw ParentNotDirectoryException ",
- expectedException != null
- && expectedException instanceof ParentNotDirectoryException);
+ assertTrue(
+ expectedException != null
+ && expectedException instanceof ParentNotDirectoryException, "Overwrite a file when parent directory exists as a file"
+ + " should throw ParentNotDirectoryException ");
fs.delete(path, true);
// Overwrite a file in a non-exist directory, should fail
final Path path3 = new Path(nonExistDir + "/testOverwriteNonRecursive");
expectedException = createNonRecursive(fs, path3, 1, overwriteFlag);
- assertTrue("Overwrite a file in a non-exist dir using"
- + " createNonRecursive() should throw FileNotFoundException ",
- expectedException != null
- && expectedException instanceof FileNotFoundException);
+ assertTrue(
+ expectedException != null
+ && expectedException instanceof FileNotFoundException, "Overwrite a file in a non-exist dir using"
+ + " createNonRecursive() should throw FileNotFoundException ");
}
// Attempts to create and close a file using FileSystem.createNonRecursive(),
@@ -1090,8 +1087,8 @@ public void testLeaseExpireHardLimit() throws Exception {
out.write("something".getBytes());
out.hflush();
int actualRepl = out.getCurrentBlockReplication();
- assertTrue(f + " should be replicated to " + DATANODE_NUM + " datanodes.",
- actualRepl == DATANODE_NUM);
+ assertTrue(
+ actualRepl == DATANODE_NUM, f + " should be replicated to " + DATANODE_NUM + " datanodes.");
// set the soft and hard limit to be 1 second so that the
// namenode triggers lease recovery
@@ -1190,7 +1187,7 @@ public void testFsCloseAfterClusterShutdown() throws IOException {
} catch (IOException e) {
hasException = true;
}
- assertTrue("Failed to close file after cluster shutdown", hasException);
+ assertTrue(hasException, "Failed to close file after cluster shutdown");
} finally {
System.out.println("testFsCloseAfterClusterShutdown successful");
if (cluster != null) {
@@ -1376,7 +1373,7 @@ public void testFileCreationWithOverwrite() throws Exception {
} finally {
in.close();
}
- Assert.assertArrayEquals(newData, result);
+ Assertions.assertArrayEquals(newData, result);
// Case 2: Restart NN, check the file
cluster.restartNameNode();
@@ -1387,7 +1384,7 @@ public void testFileCreationWithOverwrite() throws Exception {
} finally {
in.close();
}
- Assert.assertArrayEquals(newData, result);
+ Assertions.assertArrayEquals(newData, result);
// Case 3: Save new checkpoint and restart NN, check the file
NameNodeAdapter.enterSafeMode(nn, false);
@@ -1401,7 +1398,7 @@ public void testFileCreationWithOverwrite() throws Exception {
} finally {
in.close();
}
- Assert.assertArrayEquals(newData, result);
+ Assertions.assertArrayEquals(newData, result);
} finally {
if (dfs != null) {
dfs.close();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationClient.java
index 986bb560ad366..41b5b340c8805 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationClient.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
@@ -30,7 +30,7 @@
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
import org.slf4j.event.Level;
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationDelete.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationDelete.java
index 728fa7557e3a0..77ff225b5e4bc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationDelete.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationDelete.java
@@ -16,7 +16,7 @@
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.IOException;
@@ -24,7 +24,7 @@
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
import org.slf4j.event.Level;
public class TestFileCreationDelete {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationEmpty.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationEmpty.java
index fbada206bdb71..2373ea5b3d97a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationEmpty.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationEmpty.java
@@ -16,14 +16,14 @@
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertFalse;
import java.util.ConcurrentModificationException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
/**
* Test empty file creation.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileLengthOnClusterRestart.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileLengthOnClusterRestart.java
index c8420ca82a819..5bd80e36041a6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileLengthOnClusterRestart.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileLengthOnClusterRestart.java
@@ -23,8 +23,8 @@
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
-import org.junit.Assert;
import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
/** Test the fileLength on cluster restarts */
public class TestFileLengthOnClusterRestart {
@@ -55,7 +55,7 @@ public void testFileLengthWithHSyncAndClusterRestartWithOutDNsRegister()
in = (HdfsDataInputStream) dfs.open(path, 1024);
// Verify the length when we just restart NN. DNs will register
// immediately.
- Assert.assertEquals(fileLength, in.getVisibleLength());
+ Assertions.assertEquals(fileLength, in.getVisibleLength());
cluster.shutdownDataNodes();
cluster.restartNameNode(false);
// This is just for ensuring NN started.
@@ -63,9 +63,9 @@ public void testFileLengthWithHSyncAndClusterRestartWithOutDNsRegister()
try {
in = (HdfsDataInputStream) dfs.open(path);
- Assert.fail("Expected IOException");
+ Assertions.fail("Expected IOException");
} catch (IOException e) {
- Assert.assertTrue(e.getLocalizedMessage().indexOf(
+ Assertions.assertTrue(e.getLocalizedMessage().indexOf(
"Name node is in safe mode") >= 0);
}
} finally {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java
index a5f8911b97658..cfa83d35527ef 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java
@@ -17,10 +17,7 @@
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
import java.io.FileNotFoundException;
import java.io.IOException;
@@ -38,16 +35,16 @@
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
import org.slf4j.event.Level;
/**
* This class tests the FileStatus API.
*/
public class TestFileStatus {
- {
+ static {
GenericTestUtils.setLogLevel(FSNamesystem.LOG, Level.TRACE);
GenericTestUtils.setLogLevel(FileSystem.LOG, Level.TRACE);
}
@@ -63,7 +60,7 @@ public class TestFileStatus {
private static DFSClient dfsClient;
private static Path file1;
- @BeforeClass
+ @BeforeAll
public static void testSetUp() throws Exception {
conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_LIST_LIMIT, 2);
@@ -76,7 +73,7 @@ public static void testSetUp() throws Exception {
seed);
}
- @AfterClass
+ @AfterAll
public static void testTearDown() throws Exception {
if (fs != null) {
fs.close();
@@ -96,13 +93,13 @@ private void checkFile(FileSystem fileSys, Path name, int repl)
public void testGetFileInfo() throws IOException {
// Check that / exists
Path path = new Path("/");
- assertTrue("/ should be a directory",
- fs.getFileStatus(path).isDirectory());
+ assertTrue(
+ fs.getFileStatus(path).isDirectory(), "/ should be a directory");
ContractTestUtils.assertNotErasureCoded(fs, path);
// Make sure getFileInfo returns null for files which do not exist
HdfsFileStatus fileInfo = dfsClient.getFileInfo("/noSuchFile");
- assertEquals("Non-existant file should result in null", null, fileInfo);
+ assertNull(fileInfo, "Non-existant file should result in null");
Path path1 = new Path("/name1");
Path path2 = new Path("/name1/name2");
@@ -119,8 +116,8 @@ public void testGetFileInfo() throws IOException {
dfsClient.getFileInfo("non-absolute");
fail("getFileInfo for a non-absolute path did not throw IOException");
} catch (RemoteException re) {
- assertTrue("Wrong exception for invalid file name: "+re,
- re.toString().contains("Absolute path required"));
+ assertTrue(
+ re.toString().contains("Absolute path required"), "Wrong exception for invalid file name: " + re);
}
}
@@ -131,7 +128,7 @@ public void testGetFileStatusOnFile() throws Exception {
checkFile(fs, file1, 1);
// test getFileStatus on a file
FileStatus status = fs.getFileStatus(file1);
- assertFalse(file1 + " should be a file", status.isDirectory());
+ assertFalse(status.isDirectory(), file1 + " should be a file");
assertEquals(blockSize, status.getBlockSize());
assertEquals(1, status.getReplication());
assertEquals(fileSize, status.getLen());
@@ -139,9 +136,9 @@ public void testGetFileStatusOnFile() throws Exception {
assertEquals(file1.makeQualified(fs.getUri(),
fs.getWorkingDirectory()).toString(),
status.getPath().toString());
- assertTrue(file1 + " should have erasure coding unset in " +
- "FileStatus#toString(): " + status,
- status.toString().contains("isErasureCoded=false"));
+ assertTrue(
+ status.toString().contains("isErasureCoded=false"), file1 + " should have erasure coding unset in " +
+ "FileStatus#toString(): " + status);
}
/** Test the FileStatus obtained calling listStatus on a file */
@@ -150,7 +147,7 @@ public void testListStatusOnFile() throws IOException {
FileStatus[] stats = fs.listStatus(file1);
assertEquals(1, stats.length);
FileStatus status = stats[0];
- assertFalse(file1 + " should be a file", status.isDirectory());
+ assertFalse(status.isDirectory(), file1 + " should be a file");
assertEquals(blockSize, status.getBlockSize());
assertEquals(1, status.getReplication());
assertEquals(fileSize, status.getLen());
@@ -162,7 +159,7 @@ public void testListStatusOnFile() throws IOException {
RemoteIterator itor = fc.listStatus(file1);
status = itor.next();
assertEquals(stats[0], status);
- assertFalse(file1 + " should be a file", status.isDirectory());
+ assertFalse(status.isDirectory(), file1 + " should be a file");
}
/** Test getting a FileStatus object using a non-existant path */
@@ -186,8 +183,8 @@ public void testGetFileStatusOnNonExistantFileDir() throws IOException {
fs.getFileStatus(dir);
fail("getFileStatus of non-existent path should fail");
} catch (FileNotFoundException fe) {
- assertTrue("Exception doesn't indicate non-existant path",
- fe.getMessage().startsWith("File does not exist"));
+ assertTrue(
+ fe.getMessage().startsWith("File does not exist"), "Exception doesn't indicate non-existant path");
}
}
@@ -196,13 +193,13 @@ public void testGetFileStatusOnNonExistantFileDir() throws IOException {
public void testGetFileStatusOnDir() throws Exception {
// Create the directory
Path dir = new Path("/test/mkdirs");
- assertTrue("mkdir failed", fs.mkdirs(dir));
- assertTrue("mkdir failed", fs.exists(dir));
+ assertTrue(fs.mkdirs(dir), "mkdir failed");
+ assertTrue(fs.exists(dir), "mkdir failed");
// test getFileStatus on an empty directory
FileStatus status = fs.getFileStatus(dir);
- assertTrue(dir + " should be a directory", status.isDirectory());
- assertTrue(dir + " should be zero size ", status.getLen() == 0);
+ assertTrue(status.isDirectory(), dir + " should be a directory");
+ assertEquals(0, status.getLen(), dir + " should be zero size ");
ContractTestUtils.assertNotErasureCoded(fs, dir);
assertEquals(dir.makeQualified(fs.getUri(),
fs.getWorkingDirectory()).toString(),
@@ -210,15 +207,15 @@ public void testGetFileStatusOnDir() throws Exception {
// test listStatus on an empty directory
FileStatus[] stats = fs.listStatus(dir);
- assertEquals(dir + " should be empty", 0, stats.length);
- assertEquals(dir + " should be zero size ",
- 0, fs.getContentSummary(dir).getLength());
+ assertEquals(0, stats.length, dir + " should be empty");
+ assertEquals(
+ 0, fs.getContentSummary(dir).getLength(), dir + " should be zero size ");
RemoteIterator itor = fc.listStatus(dir);
- assertFalse(dir + " should be empty", itor.hasNext());
+ assertFalse(itor.hasNext(), dir + " should be empty");
itor = fs.listStatusIterator(dir);
- assertFalse(dir + " should be empty", itor.hasNext());
+ assertFalse(itor.hasNext(), dir + " should be empty");
// create another file that is smaller than a block.
Path file2 = new Path(dir, "filestatus2.dat");
@@ -242,25 +239,25 @@ public void testGetFileStatusOnDir() throws Exception {
// Verify that the size of the directory increased by the size
// of the two files
- final int expected = blockSize/2;
- assertEquals(dir + " size should be " + expected,
- expected, fs.getContentSummary(dir).getLength());
+ final int expected = blockSize/2;
+ assertEquals(
+ expected, fs.getContentSummary(dir).getLength(), dir + " size should be " + expected);
// Test listStatus on a non-empty directory
stats = fs.listStatus(dir);
- assertEquals(dir + " should have two entries", 2, stats.length);
+ assertEquals(2, stats.length, dir + " should have two entries");
assertEquals(file2.toString(), stats[0].getPath().toString());
assertEquals(file3.toString(), stats[1].getPath().toString());
itor = fc.listStatus(dir);
assertEquals(file2.toString(), itor.next().getPath().toString());
assertEquals(file3.toString(), itor.next().getPath().toString());
- assertFalse("Unexpected addtional file", itor.hasNext());
+ assertFalse(itor.hasNext(), "Unexpected addtional file");
itor = fs.listStatusIterator(dir);
assertEquals(file2.toString(), itor.next().getPath().toString());
assertEquals(file3.toString(), itor.next().getPath().toString());
- assertFalse("Unexpected addtional file", itor.hasNext());
+ assertFalse(itor.hasNext(), "Unexpected addtional file");
// Test iterative listing. Now dir has 2 entries, create one more.
@@ -268,7 +265,7 @@ public void testGetFileStatusOnDir() throws Exception {
fs.mkdirs(dir3);
dir3 = fs.makeQualified(dir3);
stats = fs.listStatus(dir);
- assertEquals(dir + " should have three entries", 3, stats.length);
+ assertEquals(3, stats.length, dir + " should have three entries");
assertEquals(dir3.toString(), stats[0].getPath().toString());
assertEquals(file2.toString(), stats[1].getPath().toString());
assertEquals(file3.toString(), stats[2].getPath().toString());
@@ -277,13 +274,13 @@ public void testGetFileStatusOnDir() throws Exception {
assertEquals(dir3.toString(), itor.next().getPath().toString());
assertEquals(file2.toString(), itor.next().getPath().toString());
assertEquals(file3.toString(), itor.next().getPath().toString());
- assertFalse("Unexpected addtional file", itor.hasNext());
+ assertFalse(itor.hasNext(), "Unexpected addtional file");
itor = fs.listStatusIterator(dir);
assertEquals(dir3.toString(), itor.next().getPath().toString());
assertEquals(file2.toString(), itor.next().getPath().toString());
assertEquals(file3.toString(), itor.next().getPath().toString());
- assertFalse("Unexpected addtional file", itor.hasNext());
+ assertFalse(itor.hasNext(), "Unexpected addtional file");
// Now dir has 3 entries, create two more
Path dir4 = fs.makeQualified(new Path(dir, "dir4"));
@@ -293,7 +290,7 @@ public void testGetFileStatusOnDir() throws Exception {
fs.mkdirs(dir5);
dir5 = fs.makeQualified(dir5);
stats = fs.listStatus(dir);
- assertEquals(dir + " should have five entries", 5, stats.length);
+ assertEquals(5, stats.length, dir + " should have five entries");
assertEquals(dir3.toString(), stats[0].getPath().toString());
assertEquals(dir4.toString(), stats[1].getPath().toString());
assertEquals(dir5.toString(), stats[2].getPath().toString());
@@ -325,7 +322,7 @@ public void testGetFileStatusOnDir() throws Exception {
try {
itor.hasNext();
fail("FileNotFoundException expected");
- } catch (FileNotFoundException fnfe) {
+ } catch (FileNotFoundException ignored) {
}
fs.mkdirs(file2);
@@ -340,7 +337,7 @@ public void testGetFileStatusOnDir() throws Exception {
count++;
}
fail("FileNotFoundException expected");
- } catch (FileNotFoundException fnfe) {
+ } catch (FileNotFoundException ignored) {
}
assertEquals(2, count);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusSerialization.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusSerialization.java
index 444c0ec156a4d..9077eb8793a44 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusSerialization.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusSerialization.java
@@ -35,9 +35,9 @@
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.thirdparty.protobuf.ByteString;
+import org.junit.jupiter.api.Test;
-import org.junit.Test;
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.assertEquals;
/**
* Verify compatible FileStatus/HdfsFileStatus serialization.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithDefaultECPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithDefaultECPolicy.java
index a57777a0a1a0d..3066d39668378 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithDefaultECPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithDefaultECPolicy.java
@@ -17,23 +17,24 @@
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.contract.ContractTestUtils;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
-import org.junit.After;
-import org.junit.Before;
import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
import org.junit.rules.Timeout;
+import java.io.IOException;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
/**
* This test ensures the statuses of EC files with the default policy.
*/
@@ -45,7 +46,7 @@ public class TestFileStatusWithDefaultECPolicy {
@Rule
public Timeout globalTimeout = new Timeout(300000);
- @Before
+ @BeforeEach
public void before() throws IOException {
HdfsConfiguration conf = new HdfsConfiguration();
cluster =
@@ -56,7 +57,7 @@ public void before() throws IOException {
fs.enableErasureCodingPolicy(getEcPolicy().getName());
}
- @After
+ @AfterEach
public void after() {
if (cluster != null) {
cluster.shutdown();
@@ -89,7 +90,7 @@ public void testFileStatusWithECPolicy() throws Exception {
final ErasureCodingPolicy ecPolicy2 =
client.getFileInfo(dir.toUri().getPath()).getErasureCodingPolicy();
assertNotNull(ecPolicy2);
- assertTrue(ecPolicy1.equals(ecPolicy2));
+ assertEquals(ecPolicy1, ecPolicy2);
// test file with EC policy
fs.create(file).close();
@@ -97,11 +98,11 @@ public void testFileStatusWithECPolicy() throws Exception {
fs.getClient().getFileInfo(file.toUri().getPath())
.getErasureCodingPolicy();
assertNotNull(ecPolicy3);
- assertTrue(ecPolicy1.equals(ecPolicy3));
+ assertEquals(ecPolicy1, ecPolicy3);
ContractTestUtils.assertErasureCoded(fs, file);
FileStatus status = fs.getFileStatus(file);
- assertTrue(file + " should have erasure coding set in " +
- "FileStatus#toString(): " + status,
- status.toString().contains("isErasureCoded=true"));
+ assertTrue(status.toString().contains("isErasureCoded=true"),
+ file + " should have erasure coding set in "
+ + "FileStatus#toString(): " + status);
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFsShellPermission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFsShellPermission.java
index 7aa9f2362d068..0811f0f40f15e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFsShellPermission.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFsShellPermission.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
@@ -37,7 +37,7 @@
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
/**
* This test covers privilege related aspects of FsShell
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
index 1ee166e6cd3f1..84e7be4baea83 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.*;
import java.net.InetSocketAddress;
import java.util.Collection;
@@ -56,8 +56,7 @@
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.test.LambdaTestUtils;
-
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -118,8 +117,8 @@ public void testReadSelectNonStaleDatanode() throws Exception {
List nodeInfoList = cluster.getNameNode()
.getNamesystem().getBlockManager().getDatanodeManager()
.getDatanodeListForReport(DatanodeReportType.LIVE);
- assertEquals("Unexpected number of datanodes", NUM_DATA_NODES,
- nodeInfoList.size());
+ assertEquals(NUM_DATA_NODES,
+ nodeInfoList.size(), "Unexpected number of datanodes");
FileSystem fileSys = cluster.getFileSystem();
FSDataOutputStream stm = null;
try {
@@ -323,18 +322,18 @@ void testBlockIterator(MiniDFSCluster cluster) {
String dId = cluster.getDataNodes().get(0).getDatanodeUuid();
DatanodeDescriptor dnd = BlockManagerTestUtil.getDatanode(ns, dId);
DatanodeStorageInfo[] storages = dnd.getStorageInfos();
- assertEquals("DataNode should have 4 storages", 4, storages.length);
+ assertEquals(4, storages.length, "DataNode should have 4 storages");
Iterator dnBlockIt = null;
// check illegal start block number
try {
dnBlockIt = BlockManagerTestUtil.getBlockIterator(
cluster.getNamesystem(), dId, -1);
- assertTrue("Should throw IllegalArgumentException", false);
+ assertTrue(false, "Should throw IllegalArgumentException");
} catch(IllegalArgumentException ei) {
// as expected
}
- assertNull("Iterator should be null", dnBlockIt);
+ assertNull(dnBlockIt, "Iterator should be null");
// form an array of all DataNode blocks
int numBlocks = dnd.numBlocks();
@@ -347,8 +346,8 @@ void testBlockIterator(MiniDFSCluster cluster) {
allBlocks[idx++] = storageBlockIt.next();
try {
storageBlockIt.remove();
- assertTrue(
- "BlockInfo iterator should have been unmodifiable", false);
+ assertTrue(false,
+ "BlockInfo iterator should have been unmodifiable");
} catch (UnsupportedOperationException e) {
//expected exception
}
@@ -359,17 +358,17 @@ void testBlockIterator(MiniDFSCluster cluster) {
for(int i = 0; i < allBlocks.length; i++) {
// create iterator starting from i
dnBlockIt = BlockManagerTestUtil.getBlockIterator(ns, dId, i);
- assertTrue("Block iterator should have next block", dnBlockIt.hasNext());
+ assertTrue(dnBlockIt.hasNext(), "Block iterator should have next block");
// check iterator lists blocks in the desired order
for(int j = i; j < allBlocks.length; j++) {
- assertEquals("Wrong block order", allBlocks[j], dnBlockIt.next());
+ assertEquals(allBlocks[j], dnBlockIt.next(), "Wrong block order");
}
}
// check start block number larger than numBlocks in the DataNode
dnBlockIt = BlockManagerTestUtil.getBlockIterator(
ns, dId, allBlocks.length + 1);
- assertFalse("Iterator should not have next block", dnBlockIt.hasNext());
+ assertFalse(dnBlockIt.hasNext(), "Iterator should not have next block");
}
@Test
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetFileChecksum.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetFileChecksum.java
index cf61e8451bfe0..ae569b6fcb4c8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetFileChecksum.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetFileChecksum.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.fail;
import java.io.IOException;
@@ -25,10 +25,10 @@
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileChecksum;
import org.apache.hadoop.fs.Path;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
public class TestGetFileChecksum {
private static final int BLOCKSIZE = 1024;
@@ -38,7 +38,7 @@ public class TestGetFileChecksum {
private MiniDFSCluster cluster;
private DistributedFileSystem dfs;
- @Before
+ @BeforeEach
public void setUp() throws Exception {
conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
@@ -48,7 +48,7 @@ public void setUp() throws Exception {
dfs = cluster.getFileSystem();
}
- @After
+ @AfterEach
public void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
@@ -69,7 +69,7 @@ public void testGetFileChecksum(final Path foo, final int appendLength)
for (int i = 0; i < appendRounds + 1; i++) {
FileChecksum checksum = dfs.getFileChecksum(foo, appendLength * (i+1));
- Assert.assertTrue(checksum.equals(fc[i]));
+ Assertions.assertTrue(checksum.equals(fc[i]));
}
}
@@ -82,7 +82,7 @@ public void testGetFileChecksumForBlocksUnderConstruction() {
fail("getFileChecksum should fail for files "
+ "with blocks under construction");
} catch (IOException ie) {
- Assert.assertTrue(ie.getMessage().contains(
+ Assertions.assertTrue(ie.getMessage().contains(
"Fail to get checksum, since file /testFile "
+ "is under construction."));
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHAAuxiliaryPort.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHAAuxiliaryPort.java
index 45ccefaae67cc..2e888a0e2da2d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHAAuxiliaryPort.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHAAuxiliaryPort.java
@@ -23,13 +23,13 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMENODE_RPC_ADDRESS_AUXILIARY_KEY;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMESERVICES;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java
index 6da46dec378d2..04699ea8af16e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java
@@ -27,16 +27,16 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
public class TestHDFSFileSystemContract extends FileSystemContractBaseTest {
private MiniDFSCluster cluster;
private String defaultWorkingDirectory;
- @Before
+ @BeforeEach
public void setUp() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY,
@@ -49,7 +49,7 @@ public void setUp() throws Exception {
UserGroupInformation.getCurrentUser().getShortUserName();
}
- @After
+ @AfterEach
public void tearDown() throws Exception {
super.tearDown();
if (cluster != null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSPolicyProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSPolicyProvider.java
index 0a74a2d025659..394036e7543b5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSPolicyProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSPolicyProvider.java
@@ -17,7 +17,8 @@
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import java.util.ArrayList;
import java.util.Arrays;
@@ -34,9 +35,9 @@
import org.apache.hadoop.security.authorize.Service;
import org.apache.hadoop.util.Sets;
-import org.junit.BeforeClass;
import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
import org.junit.rules.TestName;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
@@ -74,7 +75,7 @@ public int compare(Class> lhs, Class> rhs) {
private final Class> rpcServerClass;
- @BeforeClass
+ @BeforeAll
public static void initialize() {
Service[] services = new HDFSPolicyProvider().getServices();
policyProviderProtocols = new HashSet<>(services.length);
@@ -109,15 +110,15 @@ public void testPolicyProviderForServer() {
LOG.info("Running test {} for RPC server {}. Found server protocols {} "
+ "and policy provider protocols {}.", testName.getMethodName(),
rpcServerClass.getName(), serverProtocols, policyProviderProtocols);
- assertFalse("Expected to find at least one protocol in server.",
- serverProtocols.isEmpty());
+ assertFalse(
+ serverProtocols.isEmpty(), "Expected to find at least one protocol in server.");
final Set> differenceSet =
Sets.difference(serverProtocols, policyProviderProtocols);
- assertTrue(
- String.format("Following protocols for server %s are not defined in "
- + "%s: %s",
- rpcServerClass.getName(), HDFSPolicyProvider.class.getName(),
- Arrays.toString(differenceSet.toArray())),
- differenceSet.isEmpty());
+ assertTrue(
+ differenceSet.isEmpty(),
+ String.format("Following protocols for server %s are not defined in "
+ + "%s: %s",
+ rpcServerClass.getName(), HDFSPolicyProvider.class.getName(),
+ Arrays.toString(differenceSet.toArray())));
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
index c26c648fd9d9b..8d3ea38039a3d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
@@ -36,8 +36,8 @@
import java.net.UnknownHostException;
import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* This test checks correctness of port usage by hdfs components:
@@ -280,7 +280,7 @@ public void runTestNameNodePorts(boolean withService) throws Exception {
started = canStartNameNode(conf2);
if (withService) {
- assertFalse("Should've failed on service port", started);
+ assertFalse(started, "Should've failed on service port");
// reset conf2 since NameNode modifies it
FileSystem.setDefaultUri(conf2, "hdfs://" + THIS_HOST);
@@ -379,8 +379,8 @@ public void testBackupNodePorts() throws Exception {
LOG.info("= Starting 1 on: " + backup_config.get(
DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY));
- assertFalse("Backup started on same port as Namenode",
- canStartBackupNode(backup_config)); // should fail
+ assertFalse(
+ canStartBackupNode(backup_config), "Backup started on same port as Namenode"); // should fail
// reset namenode backup address because Windows does not release
// port used previously properly.
@@ -394,7 +394,7 @@ public void testBackupNodePorts() throws Exception {
DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY));
boolean started = canStartBackupNode(backup_config);
- assertTrue("Backup Namenode should've started", started); // should start now
+ assertTrue(started, "Backup Namenode should've started"); // should start now
} finally {
stopNameNode(nn);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSTrash.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSTrash.java
index 5dbb124882d43..3b922bf8424b2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSTrash.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSTrash.java
@@ -17,9 +17,7 @@
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
import java.io.IOException;
import java.util.UUID;
@@ -36,9 +34,9 @@
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
import org.mockito.Mockito;
/**
@@ -65,7 +63,7 @@ public class TestHDFSTrash {
private static UserGroupInformation user1;
private static UserGroupInformation user2;
- @BeforeClass
+ @BeforeAll
public static void setUp() throws Exception {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
fs = FileSystem.get(conf);
@@ -92,7 +90,7 @@ public static void setUp() throws Exception {
null, FsAction.ALL, FsAction.ALL, FsAction.ALL);
}
- @AfterClass
+ @AfterAll
public static void tearDown() {
if (cluster != null) { cluster.shutdown(); }
}
@@ -141,13 +139,13 @@ public void testDeleteTrash() throws Exception {
fs.mkdirs(user1Tmp);
Trash u1Trash = getPerUserTrash(user1, fs, testConf);
Path u1t = u1Trash.getCurrentTrashDir(user1Tmp);
- assertTrue(String.format("Failed to move %s to trash", user1Tmp),
- u1Trash.moveToTrash(user1Tmp));
- assertTrue(
- String.format(
- "%s should be allowed to remove its own trash directory %s",
- user1.getUserName(), u1t),
- fs.delete(u1t, true));
+ assertTrue(
+ u1Trash.moveToTrash(user1Tmp), String.format("Failed to move %s to trash", user1Tmp));
+ assertTrue(
+ fs.delete(u1t, true),
+ String.format(
+ "%s should be allowed to remove its own trash directory %s",
+ user1.getUserName(), u1t));
assertFalse(fs.exists(u1t));
// login as user2, move something to trash
@@ -165,8 +163,8 @@ public void testDeleteTrash() throws Exception {
USER1_NAME, USER2_NAME));
} catch (AccessControlException e) {
assertTrue(e instanceof AccessControlException);
- assertTrue("Permission denied messages must carry the username",
- e.getMessage().contains(USER1_NAME));
+ assertTrue(
+ e.getMessage().contains(USER1_NAME), "Permission denied messages must carry the username");
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHFlush.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHFlush.java
index 711291c4051f1..8844c4f92626f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHFlush.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHFlush.java
@@ -17,9 +17,7 @@
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
import java.io.IOException;
import java.io.InterruptedIOException;
@@ -35,7 +33,7 @@
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
import org.slf4j.event.Level;
/** Class contains a set of tests to verify the correctness of
@@ -337,9 +335,9 @@ public static void doTheJob(Configuration conf, final String fileName,
// Check file length if updatelength is required
if (isSync && syncFlags.contains(SyncFlag.UPDATE_LENGTH)) {
long currentFileLength = fileSystem.getFileStatus(path).getLen();
- assertEquals(
- "File size doesn't match for hsync/hflush with updating the length",
- tenth * (i + 1), currentFileLength);
+ assertEquals(
+ tenth * (i + 1), currentFileLength,
+ "File size doesn't match for hsync/hflush with updating the length");
} else if (isSync && syncFlags.contains(SyncFlag.END_BLOCK)) {
LocatedBlocks blocks = fileSystem.dfs.getLocatedBlocks(pathName, 0);
assertEquals(i + 1, blocks.getLocatedBlocks().size());
@@ -353,7 +351,7 @@ public static void doTheJob(Configuration conf, final String fileName,
is.seek(tenth * i);
int readBytes = is.read(toRead, 0, tenth);
System.out.println("Has read " + readBytes);
- assertTrue("Should've get more bytes", (readBytes > 0) && (readBytes <= tenth));
+ assertTrue((readBytes > 0) && (readBytes <= tenth), "Should've get more bytes");
is.close();
checkData(toRead, 0, readBytes, expected, "Partial verification");
}
@@ -361,7 +359,7 @@ public static void doTheJob(Configuration conf, final String fileName,
stm.write(fileContent, tenth * SECTIONS, rounding);
stm.close();
- assertEquals("File size doesn't match ", AppendTestUtil.FILE_SIZE, fileSystem.getFileStatus(path).getLen());
+ assertEquals(AppendTestUtil.FILE_SIZE, fileSystem.getFileStatus(path).getLen(), "File size doesn't match ");
AppendTestUtil.checkFullFile(fileSystem, path, fileContent.length, fileContent, "hflush()");
} finally {
fileSystem.close();
@@ -371,9 +369,9 @@ public static void doTheJob(Configuration conf, final String fileName,
static void checkData(final byte[] actual, int from, int len,
final byte[] expected, String message) {
for (int idx = 0; idx < len; idx++) {
- assertEquals(message+" byte "+(from+idx)+" differs. expected "+
- expected[from+idx]+" actual "+actual[idx],
- expected[from+idx], actual[idx]);
+ assertEquals(
+ expected[from + idx], actual[idx], message + " byte " + (from + idx) + " differs. expected " +
+ expected[from + idx] + " actual " + actual[idx]);
actual[idx] = 0;
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java
index a7afa66f22ae0..51ab312447f87 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java
@@ -17,9 +17,7 @@
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
import java.io.File;
import java.io.IOException;
@@ -46,10 +44,10 @@
import org.apache.hadoop.hdfs.protocol.OpenFilesIterator.OpenFilesType;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.util.Sets;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
public class TestHdfsAdmin {
@@ -60,7 +58,7 @@ public class TestHdfsAdmin {
private final Configuration conf = new Configuration();
private MiniDFSCluster cluster;
- @Before
+ @BeforeEach
public void setUpCluster() throws IOException {
conf.setLong(
DFSConfigKeys.DFS_NAMENODE_LIST_OPENFILES_NUM_RESPONSES,
@@ -69,7 +67,7 @@ public void setUpCluster() throws IOException {
cluster.waitActive();
}
- @After
+ @AfterEach
public void shutDownCluster() {
if (cluster != null) {
cluster.shutdown();
@@ -182,9 +180,9 @@ public void testHdfsAdminStoragePolicies() throws Exception {
policyNamesSet2.add(policy.getName());
}
// Ensure that we got the same set of policies in both cases.
- Assert.assertTrue(
+ Assertions.assertTrue(
Sets.difference(policyNamesSet1, policyNamesSet2).isEmpty());
- Assert.assertTrue(
+ Assertions.assertTrue(
Sets.difference(policyNamesSet2, policyNamesSet1).isEmpty());
}
@@ -200,8 +198,8 @@ private static String getKeyProviderURI() {
@Test
public void testGetKeyProvider() throws IOException {
HdfsAdmin hdfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
- Assert.assertNull("should return null for an non-encrypted cluster",
- hdfsAdmin.getKeyProvider());
+ Assertions.assertNull(
+ hdfsAdmin.getKeyProvider(), "should return null for an non-encrypted cluster");
shutDownCluster();
@@ -213,8 +211,8 @@ public void testGetKeyProvider() throws IOException {
cluster.waitActive();
hdfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
- Assert.assertNotNull("should not return null for an encrypted cluster",
- hdfsAdmin.getKeyProvider());
+ Assertions.assertNotNull(
+ hdfsAdmin.getKeyProvider(), "should not return null for an encrypted cluster");
}
@Test(timeout = 120000L)
@@ -260,10 +258,10 @@ private void verifyOpenFilesHelper(
HashSet openFiles) throws IOException {
while (openFilesRemoteItr.hasNext()) {
String filePath = openFilesRemoteItr.next().getFilePath();
- assertFalse(filePath + " should not be listed under open files!",
- closedFiles.contains(new Path(filePath)));
- assertTrue(filePath + " is not listed under open files!",
- openFiles.remove(new Path(filePath)));
+ assertFalse(
+ closedFiles.contains(new Path(filePath)), filePath + " should not be listed under open files!");
+ assertTrue(
+ openFiles.remove(new Path(filePath)), filePath + " is not listed under open files!");
}
}
@@ -275,7 +273,7 @@ private void verifyOpenFiles(HashSet closedFiles,
hdfsAdmin.listOpenFiles(EnumSet.of(OpenFilesType.ALL_OPEN_FILES),
OpenFilesIterator.FILTER_PATH_DEFAULT);
verifyOpenFilesHelper(openFilesRemoteItr, closedFiles, openFiles);
- assertTrue("Not all open files are listed!", openFiles.isEmpty());
+ assertTrue(openFiles.isEmpty(), "Not all open files are listed!");
}
/**
@@ -289,6 +287,6 @@ private void verifyOpenFilesOld(HashSet closedFiles,
RemoteIterator openFilesRemoteItr =
hdfsAdmin.listOpenFiles(EnumSet.of(OpenFilesType.ALL_OPEN_FILES));
verifyOpenFilesHelper(openFilesRemoteItr, closedFiles, openFiles);
- assertTrue("Not all open files are listed!", openFiles.isEmpty());
+ assertTrue(openFiles.isEmpty(), "Not all open files are listed!");
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java
index a8affa262735e..d8893db41dd24 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
import java.io.IOException;
import java.net.InetSocketAddress;
@@ -38,7 +38,7 @@
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.util.Time;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java
index 2e4a08bf0ba28..554326ba10515 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java
@@ -17,8 +17,8 @@
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.IOException;
import java.net.InetSocketAddress;
@@ -45,9 +45,9 @@
import org.apache.hadoop.ipc.RefreshCallQueueProtocol;
import org.apache.hadoop.tools.GetUserMappingsProtocol;
import org.apache.hadoop.tools.protocolPB.GetUserMappingsProtocolClientSideTranslatorPB;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
/**
* Test cases to verify that client side translators correctly implement the
@@ -59,7 +59,7 @@ public class TestIsMethodSupported {
private static InetSocketAddress nnAddress = null;
private static InetSocketAddress dnAddress = null;
- @BeforeClass
+ @BeforeAll
public static void setUp() throws Exception {
cluster = (new MiniDFSCluster.Builder(conf))
.numDataNodes(1).build();
@@ -69,7 +69,7 @@ public static void setUp() throws Exception {
dn.getIpcPort());
}
- @AfterClass
+ @AfterAll
public static void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestKeyProviderCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestKeyProviderCache.java
index 9fc6b3894940c..cf3d9bd6196ce 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestKeyProviderCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestKeyProviderCache.java
@@ -25,8 +25,8 @@
import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.crypto.key.KeyProviderFactory;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
public class TestKeyProviderCache {
@@ -98,31 +98,31 @@ public void testCache() throws Exception {
"dummy://foo:bar@test_provider1");
KeyProvider keyProvider1 = kpCache.get(conf,
getKeyProviderUriFromConf(conf));
- Assert.assertNotNull("Returned Key Provider is null !!", keyProvider1);
+ Assertions.assertNotNull(keyProvider1, "Returned Key Provider is null !!");
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
"dummy://foo:bar@test_provider1");
KeyProvider keyProvider2 = kpCache.get(conf,
getKeyProviderUriFromConf(conf));
- Assert.assertTrue("Different KeyProviders returned !!",
- keyProvider1 == keyProvider2);
+ Assertions.assertTrue(
+ keyProvider1 == keyProvider2, "Different KeyProviders returned !!");
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
"dummy://test_provider3");
KeyProvider keyProvider3 = kpCache.get(conf,
getKeyProviderUriFromConf(conf));
- Assert.assertFalse("Same KeyProviders returned !!",
- keyProvider1 == keyProvider3);
+ Assertions.assertFalse(
+ keyProvider1 == keyProvider3, "Same KeyProviders returned !!");
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
"dummy://hello:there@test_provider1");
KeyProvider keyProvider4 = kpCache.get(conf,
getKeyProviderUriFromConf(conf));
- Assert.assertFalse("Same KeyProviders returned !!",
- keyProvider1 == keyProvider4);
+ Assertions.assertFalse(
+ keyProvider1 == keyProvider4, "Same KeyProviders returned !!");
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java
index 137571cac4337..38d160afdaf3f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.IOException;
import java.util.Arrays;
@@ -135,13 +135,13 @@ static void checkFullFile(FileSystem fs, Path name, final long fileSize)
if (verifyData) {
// verify data read
if (thisread == readSize) {
- assertTrue("file is corrupted at or after byte " +
- (fileSize - bytesToRead), Arrays.equals(b, compb));
+ assertTrue(Arrays.equals(b, compb), "file is corrupted at or after byte " +
+ (fileSize - bytesToRead));
} else {
// b was only partially filled by last read
for (int k = 0; k < thisread; k++) {
- assertTrue("file is corrupted at or after byte " +
- (fileSize - bytesToRead), b[k] == compb[k]);
+ assertTrue(b[k] == compb[k], "file is corrupted at or after byte " +
+ (fileSize - bytesToRead));
}
}
}
@@ -189,9 +189,9 @@ public void runTest(final long blockSize) throws IOException {
fileSize +
" blocksize " + blockSize);
- // verify that file exists in FS namespace
- assertTrue(file1 + " should be a file",
- fs.getFileStatus(file1).isFile());
+ // verify that file exists in FS namespace
+ assertTrue(
+ fs.getFileStatus(file1).isFile(), file1 + " should be a file");
// write to file
writeFile(stm, fileSize);
@@ -206,9 +206,9 @@ public void runTest(final long blockSize) throws IOException {
// verify that file size has changed
long len = fs.getFileStatus(file1).getLen();
- assertTrue(file1 + " should be of size " + fileSize +
- " but found to be of size " + len,
- len == fileSize);
+ assertTrue(
+ len == fileSize, file1 + " should be of size " + fileSize +
+ " but found to be of size " + len);
} finally {
cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java
index 5d7b62a42846a..28bae9afd85f9 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java
@@ -50,8 +50,8 @@
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Time;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
import org.mockito.Mockito;
public class TestLease {
@@ -108,7 +108,7 @@ public void testLeaseAbort() throws Exception {
d_out.write(buf, 0, 1024);
LOG.info("Write worked beyond the soft limit as expected.");
} catch (IOException e) {
- Assert.fail("Write failed.");
+ Assertions.fail("Write failed.");
}
long hardlimit = conf.getLong(DFSConfigKeys.DFS_LEASE_HARDLIMIT_KEY,
@@ -121,14 +121,14 @@ public void testLeaseAbort() throws Exception {
try {
d_out.write(buf, 0, 1024);
d_out.close();
- Assert.fail("Write did not fail even after the fatal lease renewal failure");
+ Assertions.fail("Write did not fail even after the fatal lease renewal failure");
} catch (IOException e) {
LOG.info("Write failed as expected. ", e);
}
// If aborted, the renewer should be empty. (no reference to clients)
Thread.sleep(1000);
- Assert.assertTrue(originalRenewer.isEmpty());
+ Assertions.assertTrue(originalRenewer.isEmpty());
// unstub
doNothing().when(spyNN).renewLease(anyString());
@@ -137,12 +137,12 @@ public void testLeaseAbort() throws Exception {
try {
int num = c_in.read(buf, 0, 1);
if (num != 1) {
- Assert.fail("Failed to read 1 byte");
+ Assertions.fail("Failed to read 1 byte");
}
c_in.close();
} catch (IOException e) {
LOG.error("Read failed with ", e);
- Assert.fail("Read after lease renewal failure failed");
+ Assertions.fail("Read after lease renewal failure failed");
}
// new file writes should work.
@@ -152,7 +152,7 @@ public void testLeaseAbort() throws Exception {
c_out.close();
} catch (IOException e) {
LOG.error("Write failed with ", e);
- Assert.fail("Write failed");
+ Assertions.fail("Write failed");
}
} finally {
cluster.shutdown();
@@ -172,8 +172,8 @@ public void testLeaseAfterRename() throws Exception {
FSDataOutputStream out = fs.create(p);
out.writeBytes("something");
//out.hsync();
- Assert.assertTrue(hasLease(cluster, p));
- Assert.assertEquals(1, leaseCount(cluster));
+ Assertions.assertTrue(hasLease(cluster, p));
+ Assertions.assertEquals(1, leaseCount(cluster));
// just to ensure first fs doesn't have any logic to twiddle leases
DistributedFileSystem fs2 = (DistributedFileSystem) FileSystem.newInstance(fs.getUri(), fs.getConf());
@@ -183,24 +183,24 @@ public void testLeaseAfterRename() throws Exception {
Path pRenamed = new Path(d, p.getName());
fs2.mkdirs(d);
fs2.rename(p, pRenamed);
- Assert.assertFalse(p+" exists", fs2.exists(p));
- Assert.assertTrue(pRenamed+" not found", fs2.exists(pRenamed));
- Assert.assertFalse("has lease for "+p, hasLease(cluster, p));
- Assert.assertTrue("no lease for "+pRenamed, hasLease(cluster, pRenamed));
- Assert.assertEquals(1, leaseCount(cluster));
+ Assertions.assertFalse(fs2.exists(p), p + " exists");
+ Assertions.assertTrue(fs2.exists(pRenamed), pRenamed + " not found");
+ Assertions.assertFalse(hasLease(cluster, p), "has lease for " + p);
+ Assertions.assertTrue(hasLease(cluster, pRenamed), "no lease for " + pRenamed);
+ Assertions.assertEquals(1, leaseCount(cluster));
// rename the parent dir to a new non-existent dir
LOG.info("DMS: rename parent dir");
Path pRenamedAgain = new Path(d2, pRenamed.getName());
fs2.rename(d, d2);
- // src gone
- Assert.assertFalse(d+" exists", fs2.exists(d));
- Assert.assertFalse("has lease for "+pRenamed, hasLease(cluster, pRenamed));
- // dst checks
- Assert.assertTrue(d2+" not found", fs2.exists(d2));
- Assert.assertTrue(pRenamedAgain+" not found", fs2.exists(pRenamedAgain));
- Assert.assertTrue("no lease for "+pRenamedAgain, hasLease(cluster, pRenamedAgain));
- Assert.assertEquals(1, leaseCount(cluster));
+ // src gone
+ Assertions.assertFalse(fs2.exists(d), d + " exists");
+ Assertions.assertFalse(hasLease(cluster, pRenamed), "has lease for " + pRenamed);
+ // dst checks
+ Assertions.assertTrue(fs2.exists(d2), d2 + " not found");
+ Assertions.assertTrue(fs2.exists(pRenamedAgain), pRenamedAgain + " not found");
+ Assertions.assertTrue(hasLease(cluster, pRenamedAgain), "no lease for " + pRenamedAgain);
+ Assertions.assertEquals(1, leaseCount(cluster));
// rename the parent dir to existing dir
// NOTE: rename w/o options moves paths into existing dir
@@ -209,41 +209,41 @@ public void testLeaseAfterRename() throws Exception {
pRenamedAgain = new Path(new Path(d, d2.getName()), p.getName());
fs2.mkdirs(d);
fs2.rename(d2, d);
- // src gone
- Assert.assertFalse(d2+" exists", fs2.exists(d2));
- Assert.assertFalse("no lease for "+pRenamed, hasLease(cluster, pRenamed));
- // dst checks
- Assert.assertTrue(d+" not found", fs2.exists(d));
- Assert.assertTrue(pRenamedAgain +" not found", fs2.exists(pRenamedAgain));
- Assert.assertTrue("no lease for "+pRenamedAgain, hasLease(cluster, pRenamedAgain));
- Assert.assertEquals(1, leaseCount(cluster));
+ // src gone
+ Assertions.assertFalse(fs2.exists(d2), d2 + " exists");
+ Assertions.assertFalse(hasLease(cluster, pRenamed), "no lease for " + pRenamed);
+ // dst checks
+ Assertions.assertTrue(fs2.exists(d), d + " not found");
+ Assertions.assertTrue(fs2.exists(pRenamedAgain), pRenamedAgain + " not found");
+ Assertions.assertTrue(hasLease(cluster, pRenamedAgain), "no lease for " + pRenamedAgain);
+ Assertions.assertEquals(1, leaseCount(cluster));
// rename with opts to non-existent dir
pRenamed = pRenamedAgain;
pRenamedAgain = new Path(d2, p.getName());
fs2.rename(pRenamed.getParent(), d2, Options.Rename.OVERWRITE);
- // src gone
- Assert.assertFalse(pRenamed.getParent() +" not found", fs2.exists(pRenamed.getParent()));
- Assert.assertFalse("has lease for "+pRenamed, hasLease(cluster, pRenamed));
- // dst checks
- Assert.assertTrue(d2+" not found", fs2.exists(d2));
- Assert.assertTrue(pRenamedAgain+" not found", fs2.exists(pRenamedAgain));
- Assert.assertTrue("no lease for "+pRenamedAgain, hasLease(cluster, pRenamedAgain));
- Assert.assertEquals(1, leaseCount(cluster));
+ // src gone
+ Assertions.assertFalse(fs2.exists(pRenamed.getParent()), pRenamed.getParent() + " not found");
+ Assertions.assertFalse(hasLease(cluster, pRenamed), "has lease for " + pRenamed);
+ // dst checks
+ Assertions.assertTrue(fs2.exists(d2), d2 + " not found");
+ Assertions.assertTrue(fs2.exists(pRenamedAgain), pRenamedAgain + " not found");
+ Assertions.assertTrue(hasLease(cluster, pRenamedAgain), "no lease for " + pRenamedAgain);
+ Assertions.assertEquals(1, leaseCount(cluster));
// rename with opts to existing dir
// NOTE: rename with options will not move paths into the existing dir
pRenamed = pRenamedAgain;
pRenamedAgain = new Path(d, p.getName());
fs2.rename(pRenamed.getParent(), d, Options.Rename.OVERWRITE);
- // src gone
- Assert.assertFalse(pRenamed.getParent() +" not found", fs2.exists(pRenamed.getParent()));
- Assert.assertFalse("has lease for "+pRenamed, hasLease(cluster, pRenamed));
- // dst checks
- Assert.assertTrue(d+" not found", fs2.exists(d));
- Assert.assertTrue(pRenamedAgain+" not found", fs2.exists(pRenamedAgain));
- Assert.assertTrue("no lease for "+pRenamedAgain, hasLease(cluster, pRenamedAgain));
- Assert.assertEquals(1, leaseCount(cluster));
+ // src gone
+ Assertions.assertFalse(fs2.exists(pRenamed.getParent()), pRenamed.getParent() + " not found");
+ Assertions.assertFalse(hasLease(cluster, pRenamed), "has lease for " + pRenamed);
+ // dst checks
+ Assertions.assertTrue(fs2.exists(d), d + " not found");
+ Assertions.assertTrue(fs2.exists(pRenamedAgain), pRenamedAgain + " not found");
+ Assertions.assertTrue(hasLease(cluster, pRenamedAgain), "no lease for " + pRenamedAgain);
+ Assertions.assertEquals(1, leaseCount(cluster));
out.close();
} finally {
cluster.shutdown();
@@ -269,8 +269,8 @@ public void testLeaseAfterRenameAndRecreate() throws Exception {
FileSystem fs = cluster.getFileSystem();
FSDataOutputStream out1 = fs.create(path1);
out1.writeBytes(contents1);
- Assert.assertTrue(hasLease(cluster, path1));
- Assert.assertEquals(1, leaseCount(cluster));
+ Assertions.assertTrue(hasLease(cluster, path1));
+ Assertions.assertEquals(1, leaseCount(cluster));
DistributedFileSystem fs2 = (DistributedFileSystem)
FileSystem.newInstance(fs.getUri(), fs.getConf());
@@ -281,14 +281,14 @@ public void testLeaseAfterRenameAndRecreate() throws Exception {
out2.close();
// The first file should still be open and valid
- Assert.assertTrue(hasLease(cluster, path2));
+ Assertions.assertTrue(hasLease(cluster, path2));
out1.close();
// Contents should be as expected
DistributedFileSystem fs3 = (DistributedFileSystem)
FileSystem.newInstance(fs.getUri(), fs.getConf());
- Assert.assertEquals(contents1, DFSTestUtil.readFile(fs3, path2));
- Assert.assertEquals(contents2, DFSTestUtil.readFile(fs3, path1));
+ Assertions.assertEquals(contents1, DFSTestUtil.readFile(fs3, path2));
+ Assertions.assertEquals(contents2, DFSTestUtil.readFile(fs3, path1));
} finally {
cluster.shutdown();
}
@@ -299,7 +299,7 @@ public void testLease() throws Exception {
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
try {
FileSystem fs = cluster.getFileSystem();
- Assert.assertTrue(fs.mkdirs(dir));
+ Assertions.assertTrue(fs.mkdirs(dir));
Path a = new Path(dir, "a");
Path b = new Path(dir, "b");
@@ -307,30 +307,30 @@ public void testLease() throws Exception {
DataOutputStream a_out = fs.create(a);
a_out.writeBytes("something");
- Assert.assertTrue(hasLease(cluster, a));
- Assert.assertTrue(!hasLease(cluster, b));
+ Assertions.assertTrue(hasLease(cluster, a));
+ Assertions.assertTrue(!hasLease(cluster, b));
DataOutputStream b_out = fs.create(b);
b_out.writeBytes("something");
- Assert.assertTrue(hasLease(cluster, a));
- Assert.assertTrue(hasLease(cluster, b));
+ Assertions.assertTrue(hasLease(cluster, a));
+ Assertions.assertTrue(hasLease(cluster, b));
a_out.close();
b_out.close();
- Assert.assertTrue(!hasLease(cluster, a));
- Assert.assertTrue(!hasLease(cluster, b));
+ Assertions.assertTrue(!hasLease(cluster, a));
+ Assertions.assertTrue(!hasLease(cluster, b));
Path fileA = new Path(dir, "fileA");
FSDataOutputStream fileA_out = fs.create(fileA);
fileA_out.writeBytes("something");
- Assert.assertTrue("Failed to get the lease!", hasLease(cluster, fileA));
+ Assertions.assertTrue(hasLease(cluster, fileA), "Failed to get the lease!");
fs.delete(dir, true);
try {
fileA_out.hflush();
- Assert.fail("Should validate file existence!");
+ Assertions.fail("Should validate file existence!");
} catch (FileNotFoundException e) {
// expected
GenericTestUtils.assertExceptionContains("File does not exist", e);
@@ -380,17 +380,17 @@ public void testFactory() throws Exception {
FSDataOutputStream out1 = createFsOut(c1, "/out1");
final DFSClient c2 = createDFSClientAs(ugi[0], conf);
FSDataOutputStream out2 = createFsOut(c2, "/out2");
- Assert.assertEquals(c1.getLeaseRenewer(), c2.getLeaseRenewer());
+ Assertions.assertEquals(c1.getLeaseRenewer(), c2.getLeaseRenewer());
final DFSClient c3 = createDFSClientAs(ugi[1], conf);
FSDataOutputStream out3 = createFsOut(c3, "/out3");
- Assert.assertTrue(c1.getLeaseRenewer() != c3.getLeaseRenewer());
+ Assertions.assertTrue(c1.getLeaseRenewer() != c3.getLeaseRenewer());
final DFSClient c4 = createDFSClientAs(ugi[1], conf);
FSDataOutputStream out4 = createFsOut(c4, "/out4");
- Assert.assertEquals(c3.getLeaseRenewer(), c4.getLeaseRenewer());
+ Assertions.assertEquals(c3.getLeaseRenewer(), c4.getLeaseRenewer());
final DFSClient c5 = createDFSClientAs(ugi[2], conf);
FSDataOutputStream out5 = createFsOut(c5, "/out5");
- Assert.assertTrue(c1.getLeaseRenewer() != c5.getLeaseRenewer());
- Assert.assertTrue(c3.getLeaseRenewer() != c5.getLeaseRenewer());
+ Assertions.assertTrue(c1.getLeaseRenewer() != c5.getLeaseRenewer());
+ Assertions.assertTrue(c3.getLeaseRenewer() != c5.getLeaseRenewer());
}
private FSDataOutputStream createFsOut(DFSClient dfs, String path)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
index e2c956ecd277f..b7d61fafbad3a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
@@ -16,11 +16,7 @@
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
import java.io.IOException;
import java.util.EnumSet;
@@ -57,8 +53,8 @@
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.DataChecksum;
-import org.junit.After;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Test;
public class TestLeaseRecovery {
static final int BLOCK_SIZE = 1024;
@@ -67,7 +63,7 @@ public class TestLeaseRecovery {
private MiniDFSCluster cluster;
- @After
+ @AfterEach
public void shutdown() throws IOException {
if (cluster != null) {
cluster.shutdown();
@@ -172,7 +168,7 @@ public void testBlockSynchronization() throws Exception {
waitLeaseRecovery(cluster);
// verify that we still cannot recover the lease
LeaseManager lm = NameNodeAdapter.getLeaseManager(cluster.getNamesystem());
- assertTrue("Found " + lm.countLease() + " lease, expected 1", lm.countLease() == 1);
+ assertTrue(lm.countLease() == 1, "Found " + lm.countLease() + " lease, expected 1");
cluster.getNameNodeRpc().setSafeMode(
HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, false);
}
@@ -232,7 +228,7 @@ public void testBlockRecoveryWithLessMetafile() throws Exception {
while (++count < 10 && !newdfs.recoverLease(file)) {
Thread.sleep(1000);
}
- assertTrue("File should be closed", newdfs.recoverLease(file));
+ assertTrue(newdfs.recoverLease(file), "File should be closed");
// Verify file length after lease recovery. The new file length should not
// include the bytes with corrupted checksum.
@@ -281,8 +277,8 @@ public void testBlockRecoveryRetryAfterFailedRecovery() throws Exception {
while (count++ < 15 && !newDfs.recoverLease(file)) {
Thread.sleep(1000);
}
- // The lease should have been recovered.
- assertTrue("File should be closed", newDfs.recoverLease(file));
+ // The lease should have been recovered.
+ assertTrue(newDfs.recoverLease(file), "File should be closed");
}
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
index bfa3deaa6b1b4..8c073144bdcdf 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
@@ -17,10 +17,7 @@
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.spy;
@@ -53,10 +50,10 @@
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
import org.mockito.Mockito;
import org.slf4j.event.Level;
@@ -92,7 +89,7 @@ public class TestLeaseRecovery2 {
*
* @throws IOException
*/
- @Before
+ @BeforeEach
public void startUp() throws IOException {
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
@@ -109,7 +106,7 @@ public void startUp() throws IOException {
* stop the cluster
* @throws IOException
*/
- @After
+ @AfterEach
public void tearDown() throws IOException {
if (cluster != null) {
IOUtils.closeStream(dfs);
@@ -320,11 +317,11 @@ private void verifyFile(FileSystem dfs, Path filepath, byte[] actual,
AppendTestUtil.LOG.info("Lease for file " + filepath + " is recovered. "
+ "Validating its contents now...");
- // verify that file-size matches
- assertTrue("File should be " + size + " bytes, but is actually " +
- " found to be " + dfs.getFileStatus(filepath).getLen() +
- " bytes",
- dfs.getFileStatus(filepath).getLen() == size);
+ // verify that file-size matches
+ assertTrue(
+ dfs.getFileStatus(filepath).getLen() == size, "File should be " + size + " bytes, but is actually " +
+ " found to be " + dfs.getFileStatus(filepath).getLen() +
+ " bytes");
// verify that there is enough data to read.
System.out.println("File size is good. Now validating sizes from datanodes...");
@@ -471,8 +468,8 @@ public void testSoftLeaseRecovery() throws Exception {
// verify that file-size matches
long fileSize = dfs.getFileStatus(filepath).getLen();
- assertTrue("File should be " + size + " bytes, but is actually " +
- " found to be " + fileSize + " bytes", fileSize == size);
+ assertTrue(fileSize == size, "File should be " + size + " bytes, but is actually " +
+ " found to be " + fileSize + " bytes");
// verify data
AppendTestUtil.LOG.info("File size is good. " +
@@ -529,10 +526,10 @@ public void hardLeaseRecoveryRestartHelper(boolean doRename, int size)
String originalLeaseHolder = NameNodeAdapter.getLeaseHolderForPath(
cluster.getNameNode(), fileStr);
-
- assertFalse("original lease holder should not be the NN",
- originalLeaseHolder.startsWith(
- HdfsServerConstants.NAMENODE_LEASE_HOLDER));
+
+ assertFalse(
+ originalLeaseHolder.startsWith(
+ HdfsServerConstants.NAMENODE_LEASE_HOLDER), "original lease holder should not be the NN");
// hflush file
AppendTestUtil.LOG.info("hflush");
@@ -540,7 +537,7 @@ public void hardLeaseRecoveryRestartHelper(boolean doRename, int size)
// check visible length
final HdfsDataInputStream in = (HdfsDataInputStream)dfs.open(filePath);
- Assert.assertEquals(size, in.getVisibleLength());
+ Assertions.assertEquals(size, in.getVisibleLength());
in.close();
if (doRename) {
@@ -630,10 +627,10 @@ static void checkLease(String f, int size) {
final String holder = NameNodeAdapter.getLeaseHolderForPath(
cluster.getNameNode(), f);
if (size == 0) {
- assertEquals("lease holder should null, file is closed", null, holder);
+ assertEquals(null, holder, "lease holder should null, file is closed");
} else {
- assertTrue("lease holder should now be the NN",
- holder.startsWith(HdfsServerConstants.NAMENODE_LEASE_HOLDER));
+ assertTrue(
+ holder.startsWith(HdfsServerConstants.NAMENODE_LEASE_HOLDER), "lease holder should now be the NN");
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
index 5aa1f6383704f..31e2ac7cbedd7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
@@ -35,10 +35,10 @@
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.Whitebox;
import org.apache.hadoop.util.StringUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
import org.slf4j.event.Level;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -87,7 +87,7 @@ public class TestLeaseRecoveryStriped {
final Path p = new Path(dir, "testfile");
private final int testFileLength = (stripesPerBlock - 1) * stripeSize;
- @Before
+ @BeforeEach
public void setup() throws IOException {
conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
@@ -103,7 +103,7 @@ public void setup() throws IOException {
dfs.setErasureCodingPolicy(dir, ecPolicy.getName());
}
- @After
+ @AfterEach
public void tearDown() {
if (cluster != null) {
cluster.shutdown();
@@ -183,7 +183,7 @@ public void testLeaseRecovery() throws Exception {
String msg = "failed testCase at i=" + i + ", blockLengths="
+ blockLengths + "\n"
+ StringUtils.stringifyException(e);
- Assert.fail(msg);
+ Assertions.fail(msg);
}
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInDFS.java
index 7bb624e166537..c3a38774bfc58 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInDFS.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInDFS.java
@@ -19,8 +19,8 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.TestListFiles;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
/**
* This class tests the FileStatus API.
@@ -29,7 +29,7 @@ public class TestListFilesInDFS extends TestListFiles {
private static MiniDFSCluster cluster;
- @BeforeClass
+ @BeforeAll
public static void testSetUp() throws Exception {
setTestPaths(new Path("/tmp/TestListFilesInDFS"));
cluster = new MiniDFSCluster.Builder(conf).build();
@@ -37,7 +37,7 @@ public static void testSetUp() throws Exception {
fs.delete(TEST_DIR, true);
}
- @AfterClass
+ @AfterAll
public static void testShutdown() throws Exception {
if (cluster != null) {
fs.close();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInFileContext.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInFileContext.java
index 2f73a39f7fb55..68d1d5ad8afe6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInFileContext.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInFileContext.java
@@ -17,9 +17,7 @@
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
import java.io.IOException;
import java.util.EnumSet;
@@ -34,10 +32,10 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.permission.FsPermission;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
/**
* This class tests the FileStatus API.
@@ -56,7 +54,7 @@ public class TestListFilesInFileContext {
final private static Path FILE2 = new Path(DIR1, "file2");
final private static Path FILE3 = new Path(DIR1, "file3");
- @BeforeClass
+ @BeforeAll
public static void testSetUp() throws Exception {
cluster = new MiniDFSCluster.Builder(conf).build();
fc = FileContext.getFileContext(cluster.getConfiguration(0));
@@ -75,7 +73,7 @@ private static void writeFile(FileContext fc, Path name, int fileSize)
stm.close();
}
- @AfterClass
+ @AfterAll
public static void testShutdown() throws Exception {
if (cluster != null) {
cluster.shutdown();
@@ -106,7 +104,7 @@ public void testFile() throws IOException {
assertEquals(1, stat.getBlockLocations().length);
}
- @After
+ @AfterEach
public void cleanDir() throws IOException {
fc.delete(TEST_DIR, true);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLocalDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLocalDFS.java
index b353de1ac9152..def4b47f6e394 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLocalDFS.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLocalDFS.java
@@ -17,8 +17,8 @@
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.DataInputStream;
import java.io.DataOutputStream;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
index 5d2365a349c9a..a7eba1e914860 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
@@ -21,10 +21,8 @@
import static org.hamcrest.CoreMatchers.containsString;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.CoreMatchers.not;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.jupiter.api.Assertions.*;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
@@ -56,8 +54,8 @@
import org.apache.hadoop.util.Lists;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.ToolRunner;
-import org.junit.Assert;
import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -344,16 +342,16 @@ public void testNodeDeadWhenInEnteringMaintenance() throws Exception {
DFSTestUtil.waitForDatanodeState(
getCluster(), nodeOutofService.getDatanodeUuid(), false, 20000);
DFSClient client = getDfsClient(0);
- assertEquals("maintenance node shouldn't be live", numDatanodes - 1,
- client.datanodeReport(DatanodeReportType.LIVE).length);
+ assertEquals(numDatanodes - 1,
+ client.datanodeReport(DatanodeReportType.LIVE).length, "maintenance node shouldn't be live");
assertEquals(1, ns.getNumEnteringMaintenanceDataNodes());
getCluster().restartDataNode(dnProp, true);
getCluster().waitActive();
waitNodeState(nodeOutofService, AdminStates.ENTERING_MAINTENANCE);
assertEquals(1, ns.getNumEnteringMaintenanceDataNodes());
- assertEquals("maintenance node should be live", numDatanodes,
- client.datanodeReport(DatanodeReportType.LIVE).length);
+ assertEquals(numDatanodes,
+ client.datanodeReport(DatanodeReportType.LIVE).length, "maintenance node should be live");
cleanupFile(fileSys, file);
}
@@ -479,7 +477,7 @@ public void testFileBlockReplicationAffectingMaintenance()
int fileBlockReplication = maintenanceMinRepl + 1;
int numAddedDataNodes = 1;
int numInitialDataNodes = (maintenanceMinRepl * 2 - numAddedDataNodes);
- Assert.assertTrue(maintenanceMinRepl <= defaultReplication);
+ Assertions.assertTrue(maintenanceMinRepl <= defaultReplication);
testFileBlockReplicationImpl(maintenanceMinRepl,
numInitialDataNodes, numAddedDataNodes, fileBlockReplication);
@@ -557,8 +555,8 @@ public void testTransitionToDecommission() throws IOException {
AdminStates.IN_MAINTENANCE);
DFSClient client = getDfsClient(0);
- assertEquals("All datanodes must be alive", numDatanodes,
- client.datanodeReport(DatanodeReportType.LIVE).length);
+ assertEquals(numDatanodes,
+ client.datanodeReport(DatanodeReportType.LIVE).length, "All datanodes must be alive");
// test 1, verify the replica in IN_MAINTENANCE state isn't in LocatedBlock
checkWithRetry(ns, fileSys, file, replicas - 1,
@@ -784,14 +782,14 @@ public void testTakeDeadNodeOutOfMaintenance() throws Exception {
nodeOutofService);
final DFSClient client = getDfsClient(0);
- assertEquals("All datanodes must be alive", numDatanodes,
- client.datanodeReport(DatanodeReportType.LIVE).length);
+ assertEquals(numDatanodes,
+ client.datanodeReport(DatanodeReportType.LIVE).length, "All datanodes must be alive");
getCluster().stopDataNode(nodeOutofService.getXferAddr());
DFSTestUtil.waitForDatanodeState(
getCluster(), nodeOutofService.getDatanodeUuid(), false, 20000);
- assertEquals("maintenance node shouldn't be alive", numDatanodes - 1,
- client.datanodeReport(DatanodeReportType.LIVE).length);
+ assertEquals(numDatanodes - 1,
+ client.datanodeReport(DatanodeReportType.LIVE).length, "maintenance node shouldn't be alive");
// Dead maintenance node's blocks should remain in block map.
checkWithRetry(ns, fileSys, file, replicas - 1,
@@ -840,15 +838,15 @@ public void testWithNNAndDNRestart() throws Exception {
nodeOutofService);
DFSClient client = getDfsClient(0);
- assertEquals("All datanodes must be alive", numDatanodes,
- client.datanodeReport(DatanodeReportType.LIVE).length);
+ assertEquals(numDatanodes,
+ client.datanodeReport(DatanodeReportType.LIVE).length, "All datanodes must be alive");
MiniDFSCluster.DataNodeProperties dnProp =
getCluster().stopDataNode(nodeOutofService.getXferAddr());
DFSTestUtil.waitForDatanodeState(
getCluster(), nodeOutofService.getDatanodeUuid(), false, 20000);
- assertEquals("maintenance node shouldn't be alive", numDatanodes - 1,
- client.datanodeReport(DatanodeReportType.LIVE).length);
+ assertEquals(numDatanodes - 1,
+ client.datanodeReport(DatanodeReportType.LIVE).length, "maintenance node shouldn't be alive");
// Dead maintenance node's blocks should remain in block map.
checkWithRetry(ns, fileSys, file, replicas - 1,
@@ -1025,9 +1023,9 @@ static String getFirstBlockFirstReplicaUuid(FileSystem fileSys,
static String checkFile(FSNamesystem ns, FileSystem fileSys,
Path name, int repl, DatanodeInfo expectedExcludedNode,
DatanodeInfo expectedMaintenanceNode) throws IOException {
- // need a raw stream
- assertTrue("Not HDFS:"+fileSys.getUri(),
- fileSys instanceof DistributedFileSystem);
+ // need a raw stream
+ assertTrue(
+ fileSys instanceof DistributedFileSystem, "Not HDFS:" + fileSys.getUri());
HdfsDataInputStream dis = (HdfsDataInputStream)fileSys.open(name);
BlockManager bm = ns.getBlockManager();
Collection dinfo = dis.getAllBlocks();
@@ -1126,9 +1124,9 @@ public Boolean get() {
static private DatanodeInfo[] getFirstBlockReplicasDatanodeInfos(
FileSystem fileSys, Path name) throws IOException {
- // need a raw stream
- assertTrue("Not HDFS:"+fileSys.getUri(),
- fileSys instanceof DistributedFileSystem);
+ // need a raw stream
+ assertTrue(
+ fileSys instanceof DistributedFileSystem, "Not HDFS:" + fileSys.getUri());
HdfsDataInputStream dis = (HdfsDataInputStream)fileSys.open(name);
Collection dinfo = dis.getAllBlocks();
if (dinfo.iterator().hasNext()) { // for the first block
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
index 74a8e44bf7b0b..6ae09f0a6db88 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
@@ -19,8 +19,8 @@
package org.apache.hadoop.hdfs;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assume.assumeTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assumptions.assumeTrue;
import java.io.File;
import java.io.IOException;
@@ -39,9 +39,8 @@
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl;
import org.apache.hadoop.test.PathUtils;
-import org.junit.Before;
import org.junit.Test;
-
+import org.junit.jupiter.api.BeforeEach;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions;
/**
@@ -58,7 +57,7 @@ public class TestMiniDFSCluster {
private static final String CLUSTER_4 = "cluster4";
private static final String CLUSTER_5 = "cluster5";
protected File testDataPath;
- @Before
+ @BeforeEach
public void setUp() {
testDataPath = new File(PathUtils.getTestDir(getClass()), "miniclusters");
}
@@ -232,8 +231,8 @@ public void testClusterSetDatanodeHostname() throws Throwable {
.numDataNodes(1)
.checkDataNodeHostConfig(true)
.build()) {
- assertEquals("DataNode hostname config not respected", "MYHOST",
- cluster5.getDataNodes().get(0).getDatanodeId().getHostName());
+ assertEquals("MYHOST",
+ cluster5.getDataNodes().get(0).getDatanodeId().getHostName(), "DataNode hostname config not respected");
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java
index cc29a93ce093f..058259946f273 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java
@@ -27,16 +27,16 @@
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.blockmanagement.AvailableSpaceBlockPlacementPolicy;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
import javax.management.*;
import java.io.IOException;
import java.lang.management.ManagementFactory;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* The test makes sure that NameNode detects presense blocks that do not have
@@ -105,7 +105,7 @@ public void testMissingBlocksAlert()
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName = new ObjectName(
"Hadoop:service=NameNode,name=NameNodeInfo");
- Assert.assertEquals(1, (long)(Long) mbs.getAttribute(mxbeanName,
+ Assertions.assertEquals(1, (long)(Long) mbs.getAttribute(mxbeanName,
"NumberOfMissingBlocks"));
// now do the reverse : remove the file expect the number of missing
@@ -121,7 +121,7 @@ public void testMissingBlocksAlert()
assertEquals(2, dfs.getLowRedundancyBlocksCount());
assertEquals(2, bm.getUnderReplicatedNotMissingBlocks());
- Assert.assertEquals(0, (long)(Long) mbs.getAttribute(mxbeanName,
+ Assertions.assertEquals(0, (long)(Long) mbs.getAttribute(mxbeanName,
"NumberOfMissingBlocks"));
Path replOneFile = new Path("/testMissingBlocks/replOneFile");
@@ -138,7 +138,7 @@ public void testMissingBlocksAlert()
}
in.close();
assertEquals(1, dfs.getMissingReplOneBlocksCount());
- Assert.assertEquals(1, (long)(Long) mbs.getAttribute(mxbeanName,
+ Assertions.assertEquals(1, (long)(Long) mbs.getAttribute(mxbeanName,
"NumberOfMissingBlocksWithReplicationFactorOne"));
} finally {
if (cluster != null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java
index 3978444c6a217..c5e2a822c8802 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java
@@ -17,8 +17,8 @@
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.IOException;
import java.io.OutputStream;
@@ -34,7 +34,7 @@
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.util.ThreadUtil;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
/**
* This class tests the decommissioning of nodes.
@@ -79,7 +79,7 @@ public void testModTime() throws IOException {
cluster.getNameNodePort());
DFSClient client = new DFSClient(addr, conf);
DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
- assertEquals("Number of Datanodes ", numDatanodes, info.length);
+ assertEquals(numDatanodes, info.length, "Number of Datanodes ");
FileSystem fileSys = cluster.getFileSystem();
int replicas = numDatanodes - 1;
assertTrue(fileSys instanceof DistributedFileSystem);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java
index a839d85823b62..48666f68dfdd6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java
@@ -35,7 +35,7 @@
import org.apache.hadoop.util.StopWatch;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
/**
* This class tests hflushing concurrently from many threads.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultipleNNPortQOP.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultipleNNPortQOP.java
index d536c5e8a969a..8bc6aa87e73ae 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultipleNNPortQOP.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultipleNNPortQOP.java
@@ -31,8 +31,8 @@
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.security.token.Token;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_RPC_PROTECTION;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_OVERWRITE_DOWNSTREAM_DERIVED_QOP_KEY;
@@ -40,7 +40,7 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SEND_QOP_ENABLED;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_AUXILIARY_KEY;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_ENCRYPT_DATA_OVERWRITE_DOWNSTREAM_NEW_QOP_KEY;
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.*;
/**
@@ -57,7 +57,7 @@ public class TestMultipleNNPortQOP extends SaslDataTransferTestCase {
private static HdfsConfiguration clusterConf;
- @Before
+ @BeforeEach
public void setup() throws Exception {
clusterConf = createSecureConfig(
"authentication,integrity,privacy");
@@ -254,11 +254,11 @@ public void testMultipleNNPortOverwriteDownStream() throws Exception {
.map(dn -> dn.getSaslClient().getTargetQOP())
.filter("auth"::equals)
.count();
- // For each datanode pipeline, targetQOPs of sasl clients in the first two
- // datanodes become equal to auth.
- // Note that it is not necessarily the case for all datanodes,
- // since a datanode may be always at the last position in pipelines.
- assertTrue("At least two qops should be auth", count >= 2);
+ // For each datanode pipeline, targetQOPs of sasl clients in the first two
+ // datanodes become equal to auth.
+ // Note that it is not necessarily the case for all datanodes,
+ // since a datanode may be always at the last position in pipelines.
+ assertTrue(count >= 2, "At least two qops should be auth");
clientConf.set(HADOOP_RPC_PROTECTION, "integrity");
FileSystem fsIntegrity = FileSystem.get(uriIntegrityPort, clientConf);
@@ -267,7 +267,7 @@ public void testMultipleNNPortOverwriteDownStream() throws Exception {
.map(dn -> dn.getSaslClient().getTargetQOP())
.filter("auth"::equals)
.count();
- assertTrue("At least two qops should be auth", count >= 2);
+ assertTrue(count >= 2, "At least two qops should be auth");
clientConf.set(HADOOP_RPC_PROTECTION, "authentication");
FileSystem fsAuth = FileSystem.get(uriAuthPort, clientConf);
@@ -276,7 +276,7 @@ public void testMultipleNNPortOverwriteDownStream() throws Exception {
.map(dn -> dn.getSaslServer().getNegotiatedQOP())
.filter("auth"::equals)
.count();
- assertEquals("All qops should be auth", 3, count);
+ assertEquals(3, count, "All qops should be auth");
} finally {
if (cluster != null) {
cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelRead.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelRead.java
index 26aa28e6e8ec9..2c21908259fa5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelRead.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelRead.java
@@ -18,11 +18,11 @@
package org.apache.hadoop.hdfs;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
public class TestParallelRead extends TestParallelReadUtil {
- @BeforeClass
+ @BeforeAll
static public void setupCluster() throws Exception {
// This is a test of the normal (TCP) read path. For this reason, we turn
// off both short-circuit local reads and UNIX domain socket data traffic.
@@ -37,7 +37,7 @@ static public void setupCluster() throws Exception {
setupCluster(DEFAULT_REPLICATION_FACTOR, conf);
}
- @AfterClass
+ @AfterAll
static public void teardownCluster() throws Exception {
TestParallelReadUtil.teardownCluster();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java
index 857ab7bdefaf7..1dc1175c4c84d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java
@@ -17,9 +17,7 @@
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
import java.io.IOException;
import java.nio.ByteBuffer;
@@ -32,8 +30,8 @@
import org.apache.hadoop.hdfs.client.impl.BlockReaderTestUtil;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.util.Time;
-import org.junit.Ignore;
-import org.junit.Test;
+import org.junit.jupiter.api.Disabled;
+import org.junit.jupiter.api.Test;
import org.slf4j.event.Level;
/**
@@ -43,7 +41,7 @@
* This class is marked as @Ignore so that junit doesn't try to execute the
* tests in here directly. They are executed from subclasses.
*/
-@Ignore
+@Disabled
public class TestParallelReadUtil {
static final Logger LOG = LoggerFactory.getLogger(TestParallelReadUtil.class);
@@ -260,9 +258,9 @@ public boolean hasError() {
* Seek to somewhere random and read.
*/
private void read(int start, int len) throws Exception {
- assertTrue(
- "Bad args: " + start + " + " + len + " should be <= " + fileSize,
- start + len <= fileSize);
+ assertTrue(
+ start + len <= fileSize,
+ "Bad args: " + start + " + " + len + " should be <= " + fileSize);
readCount++;
DFSInputStream dis = testInfo.dis;
@@ -276,9 +274,9 @@ private void read(int start, int len) throws Exception {
* Positional read.
*/
private void pRead(int start, int len) throws Exception {
- assertTrue(
- "Bad args: " + start + " + " + len + " should be <= " + fileSize,
- start + len <= fileSize);
+ assertTrue(
+ start + len <= fileSize,
+ "Bad args: " + start + " + " + len + " should be <= " + fileSize);
DFSInputStream dis = testInfo.dis;
byte buf[] = new byte[len];
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitLegacyRead.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitLegacyRead.java
index 220e45bc6915f..5b8a712e5a33f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitLegacyRead.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitLegacyRead.java
@@ -20,11 +20,11 @@
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.net.unix.DomainSocket;
import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
public class TestParallelShortCircuitLegacyRead extends TestParallelReadUtil {
- @BeforeClass
+ @BeforeAll
static public void setupCluster() throws Exception {
DFSInputStream.tcpReadsDisabledForTesting = true;
HdfsConfiguration conf = new HdfsConfiguration();
@@ -40,7 +40,7 @@ static public void setupCluster() throws Exception {
setupCluster(1, conf);
}
- @AfterClass
+ @AfterAll
static public void teardownCluster() throws Exception {
TestParallelReadUtil.teardownCluster();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitRead.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitRead.java
index 3f352b4857396..2e8d649a32dd2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitRead.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitRead.java
@@ -17,22 +17,20 @@
*/
package org.apache.hadoop.hdfs;
-import static org.hamcrest.CoreMatchers.equalTo;
-
import java.io.File;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.net.unix.DomainSocket;
import org.apache.hadoop.net.unix.TemporarySocketDirectory;
-import org.junit.AfterClass;
-import org.junit.Assume;
-import org.junit.Before;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
public class TestParallelShortCircuitRead extends TestParallelReadUtil {
private static TemporarySocketDirectory sockDir;
- @BeforeClass
+ @BeforeAll
static public void setupCluster() throws Exception {
if (DomainSocket.getLoadingFailureReason() != null) return;
DFSInputStream.tcpReadsDisabledForTesting = true;
@@ -47,12 +45,12 @@ static public void setupCluster() throws Exception {
setupCluster(1, conf);
}
- @Before
+ @BeforeEach
public void before() {
- Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null));
+ Assertions.assertNull(DomainSocket.getLoadingFailureReason());
}
- @AfterClass
+ @AfterAll
static public void teardownCluster() throws Exception {
if (DomainSocket.getLoadingFailureReason() != null) return;
sockDir.close();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitReadNoChecksum.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitReadNoChecksum.java
index df110b440c117..3e30fc3ec8748 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitReadNoChecksum.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitReadNoChecksum.java
@@ -17,22 +17,20 @@
*/
package org.apache.hadoop.hdfs;
-import static org.hamcrest.CoreMatchers.equalTo;
-
import java.io.File;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.net.unix.DomainSocket;
import org.apache.hadoop.net.unix.TemporarySocketDirectory;
-import org.junit.AfterClass;
-import org.junit.Assume;
-import org.junit.Before;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
public class TestParallelShortCircuitReadNoChecksum extends TestParallelReadUtil {
private static TemporarySocketDirectory sockDir;
- @BeforeClass
+ @BeforeAll
static public void setupCluster() throws Exception {
if (DomainSocket.getLoadingFailureReason() != null) return;
DFSInputStream.tcpReadsDisabledForTesting = true;
@@ -47,12 +45,12 @@ static public void setupCluster() throws Exception {
setupCluster(1, conf);
}
- @Before
+ @BeforeEach
public void before() {
- Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null));
+ Assertions.assertNull(DomainSocket.getLoadingFailureReason());
}
- @AfterClass
+ @AfterAll
static public void teardownCluster() throws Exception {
if (DomainSocket.getLoadingFailureReason() != null) return;
sockDir.close();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitReadUnCached.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitReadUnCached.java
index ad26e18335269..b4d080ed86f88 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitReadUnCached.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitReadUnCached.java
@@ -17,17 +17,15 @@
*/
package org.apache.hadoop.hdfs;
-import static org.hamcrest.CoreMatchers.equalTo;
-
import java.io.File;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.net.unix.DomainSocket;
import org.apache.hadoop.net.unix.TemporarySocketDirectory;
-import org.junit.AfterClass;
-import org.junit.Assume;
-import org.junit.Before;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
/**
* This class tests short-circuit local reads without any FileInputStream or
@@ -36,7 +34,7 @@
public class TestParallelShortCircuitReadUnCached extends TestParallelReadUtil {
private static TemporarySocketDirectory sockDir;
- @BeforeClass
+ @BeforeAll
static public void setupCluster() throws Exception {
if (DomainSocket.getLoadingFailureReason() != null) return;
sockDir = new TemporarySocketDirectory();
@@ -66,12 +64,12 @@ static public void setupCluster() throws Exception {
setupCluster(1, conf);
}
- @Before
+ @BeforeEach
public void before() {
- Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null));
+ Assertions.assertNull(DomainSocket.getLoadingFailureReason());
}
- @AfterClass
+ @AfterAll
static public void teardownCluster() throws Exception {
if (DomainSocket.getLoadingFailureReason() != null) return;
sockDir.close();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelUnixDomainRead.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelUnixDomainRead.java
index 872ac38c56072..b211e88f7ea04 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelUnixDomainRead.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelUnixDomainRead.java
@@ -17,22 +17,20 @@
*/
package org.apache.hadoop.hdfs;
-import static org.hamcrest.CoreMatchers.equalTo;
-
import java.io.File;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.net.unix.DomainSocket;
import org.apache.hadoop.net.unix.TemporarySocketDirectory;
-import org.junit.AfterClass;
-import org.junit.Assume;
-import org.junit.Before;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
public class TestParallelUnixDomainRead extends TestParallelReadUtil {
private static TemporarySocketDirectory sockDir;
- @BeforeClass
+ @BeforeAll
static public void setupCluster() throws Exception {
if (DomainSocket.getLoadingFailureReason() != null) return;
DFSInputStream.tcpReadsDisabledForTesting = true;
@@ -46,12 +44,12 @@ static public void setupCluster() throws Exception {
setupCluster(1, conf);
}
- @Before
+ @BeforeEach
public void before() {
- Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null));
+ Assertions.assertNull(DomainSocket.getLoadingFailureReason());
}
- @AfterClass
+ @AfterAll
static public void teardownCluster() throws Exception {
if (DomainSocket.getLoadingFailureReason() != null) return;
sockDir.close();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java
index 3658d75107abd..b0681f1dd8472 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java
@@ -18,9 +18,7 @@
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
import java.io.File;
import java.io.IOException;
@@ -44,7 +42,7 @@
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.PathUtils;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
import org.slf4j.event.Level;
/**
@@ -128,8 +126,8 @@ void testRestartDfs(boolean useFlush) throws Exception {
// Check that the file has no less bytes than before the restart
// This would mean that blocks were successfully persisted to the log
FileStatus status = fs.getFileStatus(FILE_PATH);
- assertTrue("Length too short: " + status.getLen(),
- status.getLen() >= len);
+ assertTrue(
+ status.getLen() >= len, "Length too short: " + status.getLen());
// And keep writing (ensures that leases are also persisted correctly)
stream.write(DATA_AFTER_RESTART);
@@ -194,8 +192,8 @@ public void testRestartDfsWithAbandonedBlock() throws Exception {
// Check that the file has no less bytes than before the restart
// This would mean that blocks were successfully persisted to the log
FileStatus status = fs.getFileStatus(FILE_PATH);
- assertTrue("Length incorrect: " + status.getLen(),
- status.getLen() == len - BLOCK_SIZE);
+ assertTrue(
+ status.getLen() == len - BLOCK_SIZE, "Length incorrect: " + status.getLen());
// Verify the data showed up from before restart, sans abandoned block.
FSDataInputStream readStream = fs.open(FILE_PATH);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java
index 4cead9c48a03d..ac32b8e03d0e5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java
@@ -17,8 +17,8 @@
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.IOException;
import java.util.List;
@@ -35,9 +35,9 @@
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.Replica;
import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
import org.slf4j.event.Level;
public class TestPipelines {
@@ -57,13 +57,13 @@ public class TestPipelines {
setConfiguration();
}
- @Before
+ @BeforeEach
public void startUpCluster() throws IOException {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL_FACTOR).build();
fs = cluster.getFileSystem();
}
- @After
+ @AfterEach
public void shutDownCluster() throws IOException {
if (fs != null) {
fs.close();
@@ -108,10 +108,10 @@ public void pipeline_01() throws IOException {
Replica r =
cluster.getFsDatasetTestUtils(dn).fetchReplica(lb.get(0).getBlock());
- assertTrue("Replica on DN " + dn + " shouldn't be null", r != null);
- assertEquals("Should be RBW replica on " + dn
- + " after sequence of calls append()/write()/hflush()",
- HdfsServerConstants.ReplicaState.RBW, r.getState());
+ assertTrue(r != null, "Replica on DN " + dn + " shouldn't be null");
+ assertEquals(
+ HdfsServerConstants.ReplicaState.RBW, r.getState(), "Should be RBW replica on " + dn
+ + " after sequence of calls append()/write()/hflush()");
}
ofs.close();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
index c1e0dbb8e630f..d6dec94d9d2c6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
@@ -17,8 +17,8 @@
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.EOFException;
import java.io.IOException;
@@ -50,9 +50,9 @@
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.Before;
import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
@@ -77,7 +77,7 @@ public class TestPread {
private static final Logger LOG =
LoggerFactory.getLogger(TestPread.class.getName());
- @Before
+ @BeforeEach
public void setup() {
simulatedStorage = false;
isHedgedRead = false;
@@ -98,10 +98,10 @@ private void writeFile(FileSystem fileSys, Path name) throws IOException {
// should throw an exception
res = e;
}
- assertTrue("Error reading beyond file boundary.", res != null);
+ assertTrue(res != null, "Error reading beyond file boundary.");
in.close();
if (!fileSys.delete(name, true))
- assertTrue("Cannot delete file", false);
+ assertTrue(false, "Cannot delete file");
// now create the real file
DFSTestUtil.createFile(fileSys, name, fileSize, fileSize,
@@ -110,9 +110,9 @@ private void writeFile(FileSystem fileSys, Path name) throws IOException {
private void checkAndEraseData(byte[] actual, int from, byte[] expected, String message) {
for (int idx = 0; idx < actual.length; idx++) {
- assertEquals(message+" byte "+(from+idx)+" differs. expected "+
- expected[from+idx]+" actual "+actual[idx],
- actual[idx], expected[from+idx]);
+ assertEquals(
+ actual[idx], expected[from + idx], message + " byte " + (from + idx) + " differs. expected " +
+ expected[from + idx] + " actual " + actual[idx]);
actual[idx] = 0;
}
}
@@ -131,17 +131,17 @@ private void doPread(FSDataInputStream stm, long position, byte[] buffer,
while (nread < length) {
int nbytes =
stm.read(position + nread, buffer, offset + nread, length - nread);
- assertTrue("Error in pread", nbytes > 0);
+ assertTrue(nbytes > 0, "Error in pread");
nread += nbytes;
}
if (dfstm != null) {
if (isHedgedRead) {
- assertTrue("Expected read statistic to be incremented", length <= dfstm
- .getReadStatistics().getTotalBytesRead() - totalRead);
+ assertTrue(length <= dfstm
+ .getReadStatistics().getTotalBytesRead() - totalRead, "Expected read statistic to be incremented");
} else {
- assertEquals("Expected read statistic to be incremented", length, dfstm
- .getReadStatistics().getTotalBytesRead() - totalRead);
+ assertEquals(length, dfstm
+ .getReadStatistics().getTotalBytesRead() - totalRead, "Expected read statistic to be incremented");
}
}
}
@@ -212,7 +212,7 @@ private void pReadFile(FileSystem fileSys, Path name) throws IOException {
// should throw an exception
res = e;
}
- assertTrue("Error reading beyond file boundary.", res != null);
+ assertTrue(res != null, "Error reading beyond file boundary.");
stm.close();
}
@@ -544,9 +544,9 @@ public Void call() throws IOException {
});
try {
future.get(4, TimeUnit.SECONDS);
- Assert.fail();
+ Assertions.fail();
} catch (ExecutionException ee) {
- assertTrue(ee.toString(), ee.getCause() instanceof EOFException);
+ assertTrue(ee.getCause() instanceof EOFException, ee.toString());
} finally {
future.cancel(true);
executor.shutdown();
@@ -601,7 +601,7 @@ public Void answer(InvocationOnMock invocation) throws Throwable {
byte[] buffer = new byte[64 * 1024];
input = dfsClient.open(filename);
input.read(0, buffer, 0, 1024);
- Assert.fail("Reading the block should have thrown BlockMissingException");
+ Assertions.fail("Reading the block should have thrown BlockMissingException");
} catch (BlockMissingException e) {
assertEquals(3, input.getHedgedReadOpsLoopNumForTesting());
assertTrue(metrics.getHedgedReadOps() == 0);
@@ -760,10 +760,10 @@ public Boolean get() {
byte[] buf = new byte[1024];
int n = din.read(0, buf, 0, data.length());
assertEquals(data.length(), n);
- assertEquals("Data should be read", data, new String(buf, 0, n));
- assertTrue("Read should complete with maximum " + maxFailures
- + " failures, but completed with " + din.failures,
- din.failures <= maxFailures);
+ assertEquals(data, new String(buf, 0, n), "Data should be read");
+ assertTrue(
+ din.failures <= maxFailures, "Read should complete with maximum " + maxFailures
+ + " failures, but completed with " + din.failures);
DFSClient.LOG.info("Read completed");
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
index 2e36b131fb3c3..2c1fa4bd1553a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
@@ -21,11 +21,8 @@
import static org.hamcrest.CoreMatchers.allOf;
import static org.hamcrest.CoreMatchers.containsString;
import static org.hamcrest.CoreMatchers.is;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.jupiter.api.Assertions.*;
import java.io.ByteArrayOutputStream;
import java.io.FileNotFoundException;
@@ -59,12 +56,11 @@
import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.util.Lists;
import org.apache.hadoop.util.ToolRunner;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test;
-
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
import org.apache.hadoop.thirdparty.com.google.common.base.Charsets;
import org.junit.rules.Timeout;
import org.slf4j.Logger;
@@ -90,7 +86,7 @@ public class TestQuota {
@Rule
public final Timeout testTestout = new Timeout(120000);
- @BeforeClass
+ @BeforeAll
public static void setUpClass() throws Exception {
conf = new HdfsConfiguration();
conf.set(
@@ -132,7 +128,7 @@ private static void resetStream() {
ERR_STREAM.reset();
}
- @AfterClass
+ @AfterAll
public static void tearDownClass() {
try {
System.out.flush();
@@ -288,7 +284,7 @@ public void testQuotaCommands() throws Exception {
try {
fout.write(new byte[fileLen]);
fout.close();
- Assert.fail();
+ Assertions.fail();
} catch (QuotaExceededException e) {
IOUtils.closeStream(fout);
}
@@ -396,8 +392,8 @@ public void testQuotaCommands() throws Exception {
ugi.doAs(new PrivilegedExceptionAction