diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationCrossDiffHdfs.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationCrossDiffHdfs.java
index 9edc6245295c..6b6e6eb3e154 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationCrossDiffHdfs.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationCrossDiffHdfs.java
@@ -150,15 +150,15 @@ private static void loadSomeData() throws IOException, InterruptedException {
@AfterClass
public static void tearDownClass() throws Exception {
- if (mapReduceUtil != null) {
- mapReduceUtil.shutdownMiniCluster();
- }
- if (util2 != null) {
- util2.shutdownMiniCluster();
- }
- if (util1 != null) {
- util1.shutdownMiniCluster();
- }
+ // if (mapReduceUtil != null) {
+ // mapReduceUtil.shutdownMiniCluster();
+ // }
+ // if (util2 != null) {
+ // util2.shutdownMiniCluster();
+ // }
+ // if (util1 != null) {
+ // util1.shutdownMiniCluster();
+ // }
}
@Test
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java
index 4572164c30ea..1598e6fbbb3b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java
@@ -143,6 +143,8 @@
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MiniMRCluster;
@@ -202,6 +204,7 @@ public class HBaseTestingUtil extends HBaseZKTestingUtil {
public static final boolean PRESPLIT_TEST_TABLE = true;
private MiniDFSCluster dfsCluster = null;
+ private FsDatasetAsyncDiskServiceFixer dfsClusterFixer = null;
private volatile HBaseClusterInterface hbaseCluster = null;
private MiniMRCluster mrCluster = null;
@@ -571,6 +574,56 @@ private void setFs() throws IOException {
conf.unset(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE);
}
+ // Workaround to avoid IllegalThreadStateException
+ // See HBASE-27148 for more details
+ private static final class FsDatasetAsyncDiskServiceFixer extends Thread {
+
+ private volatile boolean stopped = false;
+
+ private final MiniDFSCluster cluster;
+
+ FsDatasetAsyncDiskServiceFixer(MiniDFSCluster cluster) {
+ super("FsDatasetAsyncDiskServiceFixer");
+ setDaemon(true);
+ this.cluster = cluster;
+ }
+
+ @Override
+ public void run() {
+ while (!stopped) {
+ try {
+ Thread.sleep(30000);
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ continue;
+ }
+ // we could add new datanodes during tests, so here we will check every 30 seconds, as the
+ // timeout of the thread pool executor is 60 seconds by default.
+ try {
+ for (DataNode dn : cluster.getDataNodes()) {
+ FsDatasetSpi> dataset = dn.getFSDataset();
+ Field service = dataset.getClass().getDeclaredField("asyncDiskService");
+ service.setAccessible(true);
+ Object asyncDiskService = service.get(dataset);
+ Field group = asyncDiskService.getClass().getDeclaredField("threadGroup");
+ group.setAccessible(true);
+ ThreadGroup threadGroup = (ThreadGroup) group.get(asyncDiskService);
+ if (threadGroup.isDaemon()) {
+ threadGroup.setDaemon(false);
+ }
+ }
+ } catch (Exception e) {
+ LOG.warn("failed to reset thread pool timeout for FsDatasetAsyncDiskService", e);
+ }
+ }
+ }
+
+ void shutdown() {
+ stopped = true;
+ interrupt();
+ }
+ }
+
public MiniDFSCluster startMiniDFSCluster(int servers, final String[] racks, String[] hosts)
throws Exception {
createDirsAndSetProperties();
@@ -582,7 +635,8 @@ public MiniDFSCluster startMiniDFSCluster(int servers, final String[] racks, Str
"ERROR");
this.dfsCluster =
new MiniDFSCluster(0, this.conf, servers, true, true, true, null, racks, hosts, null);
-
+ this.dfsClusterFixer = new FsDatasetAsyncDiskServiceFixer(dfsCluster);
+ this.dfsClusterFixer.start();
// Set this just-started cluster as our filesystem.
setFs();
@@ -606,6 +660,8 @@ public MiniDFSCluster startMiniDFSClusterForTestWAL(int namenodePort) throws IOE
"ERROR");
dfsCluster =
new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null, null, null, null);
+ this.dfsClusterFixer = new FsDatasetAsyncDiskServiceFixer(dfsCluster);
+ this.dfsClusterFixer.start();
return dfsCluster;
}
@@ -728,6 +784,12 @@ public void shutdownMiniDFSCluster() throws IOException {
// The below throws an exception per dn, AsynchronousCloseException.
this.dfsCluster.shutdown();
dfsCluster = null;
+ // It is possible that the dfs cluster is set through setDFSCluster method, where we will not
+ // have a fixer
+ if (dfsClusterFixer != null) {
+ this.dfsClusterFixer.shutdown();
+ dfsClusterFixer = null;
+ }
dataTestDirOnTestFS = null;
CommonFSUtils.setFsDefault(this.conf, new Path("file:///"));
}
diff --git a/hbase-shaded/hbase-shaded-client-byo-hadoop/pom.xml b/hbase-shaded/hbase-shaded-client-byo-hadoop/pom.xml
index 51587e327da4..779e2a7ad4de 100644
--- a/hbase-shaded/hbase-shaded-client-byo-hadoop/pom.xml
+++ b/hbase-shaded/hbase-shaded-client-byo-hadoop/pom.xml
@@ -81,38 +81,6 @@
hadoop-common
provided
-
- org.codehaus.jackson
- jackson-jaxrs
- 1.9.13
- provided
-
-
- org.codehaus.jackson
- jackson-mapper-asl
-
-
- org.codehaus.jackson
- jackson-core-asl
-
-
-
-
- org.codehaus.jackson
- jackson-xc
- 1.9.13
- provided
-
-
- org.codehaus.jackson
- jackson-mapper-asl
-
-
- org.codehaus.jackson
- jackson-core-asl
-
-
-
diff --git a/hbase-shaded/hbase-shaded-mapreduce/pom.xml b/hbase-shaded/hbase-shaded-mapreduce/pom.xml
index 1a4e0f268630..0422dd03259d 100644
--- a/hbase-shaded/hbase-shaded-mapreduce/pom.xml
+++ b/hbase-shaded/hbase-shaded-mapreduce/pom.xml
@@ -203,52 +203,6 @@
org.apache.hadoop
hadoop-mapreduce-client-core
provided
-
-
- com.google.guava
- guava
-
-
- javax.xml.bind
- jaxb-api
-
-
- javax.ws.rs
- jsr311-api
-
-
-
-
- org.codehaus.jackson
- jackson-jaxrs
- 1.9.13
- provided
-
-
- org.codehaus.jackson
- jackson-mapper-asl
-
-
- org.codehaus.jackson
- jackson-core-asl
-
-
-
-
- org.codehaus.jackson
- jackson-xc
- 1.9.13
- provided
-
-
- org.codehaus.jackson
- jackson-mapper-asl
-
-
- org.codehaus.jackson
- jackson-core-asl
-
-
diff --git a/hbase-shaded/hbase-shaded-testing-util-tester/pom.xml b/hbase-shaded/hbase-shaded-testing-util-tester/pom.xml
index 8c2fc0ac56ad..e12f2bb36d54 100644
--- a/hbase-shaded/hbase-shaded-testing-util-tester/pom.xml
+++ b/hbase-shaded/hbase-shaded-testing-util-tester/pom.xml
@@ -84,12 +84,6 @@
${project.version}
test
-
- org.codehaus.jackson
- jackson-mapper-asl
- 1.9.13
- test
-
diff --git a/hbase-shaded/hbase-shaded-testing-util/pom.xml b/hbase-shaded/hbase-shaded-testing-util/pom.xml
index c3d865112093..c79623337c74 100644
--- a/hbase-shaded/hbase-shaded-testing-util/pom.xml
+++ b/hbase-shaded/hbase-shaded-testing-util/pom.xml
@@ -38,36 +38,6 @@
${hadoop.version}
test-jar
compile
-
-
- javax.servlet.jsp
- jsp-api
-
-
- org.codehaus.jackson
- jackson-mapper-asl
-
-
- org.codehaus.jackson
- jackson-core-asl
-
-
- org.codehaus.jackson
- jackson-jaxrs
-
-
- org.codehaus.jackson
- jackson-xc
-
-
- javax.xml.bind
- jaxb-api
-
-
- javax.ws.rs
- jsr311-api
-
-
org.apache.hadoop
@@ -123,12 +93,6 @@
test-jar
compile
-
- org.codehaus.jackson
- jackson-jaxrs
- 1.9.13
- compile
-
org.apache.hbase
hbase-testing-util
diff --git a/hbase-testing-util/pom.xml b/hbase-testing-util/pom.xml
index c8e2a82e236f..85168b65bb64 100644
--- a/hbase-testing-util/pom.xml
+++ b/hbase-testing-util/pom.xml
@@ -110,6 +110,11 @@
test-jar
compile
+
+ org.mockito
+ mockito-core
+ compile
+
com.github.stephenc.findbugs
findbugs-annotations
diff --git a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index 73a9c98a558f..d3f5a51dc4ae 100644
--- a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -135,6 +135,8 @@
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MiniMRCluster;
@@ -189,6 +191,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
public static final boolean PRESPLIT_TEST_TABLE = true;
private MiniDFSCluster dfsCluster = null;
+ private FsDatasetAsyncDiskServiceFixer dfsClusterFixer = null;
private volatile HBaseCluster hbaseCluster = null;
private MiniMRCluster mrCluster = null;
@@ -509,6 +512,56 @@ public boolean cleanupDataTestDirOnTestFS(String subdirName) throws IOException
return getTestFileSystem().delete(cpath, true);
}
+ // Workaround to avoid IllegalThreadStateException
+ // See HBASE-27148 for more details
+ private static final class FsDatasetAsyncDiskServiceFixer extends Thread {
+
+ private volatile boolean stopped = false;
+
+ private final MiniDFSCluster cluster;
+
+ FsDatasetAsyncDiskServiceFixer(MiniDFSCluster cluster) {
+ super("FsDatasetAsyncDiskServiceFixer");
+ setDaemon(true);
+ this.cluster = cluster;
+ }
+
+ @Override
+ public void run() {
+ while (!stopped) {
+ try {
+ Thread.sleep(30000);
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ continue;
+ }
+ // we could add new datanodes during tests, so here we will check every 30 seconds, as the
+ // timeout of the thread pool executor is 60 seconds by default.
+ try {
+ for (DataNode dn : cluster.getDataNodes()) {
+ FsDatasetSpi> dataset = dn.getFSDataset();
+ Field service = dataset.getClass().getDeclaredField("asyncDiskService");
+ service.setAccessible(true);
+ Object asyncDiskService = service.get(dataset);
+ Field group = asyncDiskService.getClass().getDeclaredField("threadGroup");
+ group.setAccessible(true);
+ ThreadGroup threadGroup = (ThreadGroup) group.get(asyncDiskService);
+ if (threadGroup.isDaemon()) {
+ threadGroup.setDaemon(false);
+ }
+ }
+ } catch (Exception e) {
+ LOG.warn("failed to reset thread pool timeout for FsDatasetAsyncDiskService", e);
+ }
+ }
+ }
+
+ void shutdown() {
+ stopped = true;
+ interrupt();
+ }
+ }
+
/**
* Start a minidfscluster.
* @param servers How many DNs to start. n * @see #shutdownMiniDFSCluster()
@@ -567,7 +620,8 @@ public MiniDFSCluster startMiniDFSCluster(int servers, final String racks[], Str
this.dfsCluster =
new MiniDFSCluster(0, this.conf, servers, true, true, true, null, racks, hosts, null);
-
+ this.dfsClusterFixer = new FsDatasetAsyncDiskServiceFixer(dfsCluster);
+ this.dfsClusterFixer.start();
// Set this just-started cluster as our filesystem.
setFs();
@@ -591,6 +645,8 @@ public MiniDFSCluster startMiniDFSClusterForTestWAL(int namenodePort) throws IOE
"ERROR");
dfsCluster =
new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null, null, null, null);
+ this.dfsClusterFixer = new FsDatasetAsyncDiskServiceFixer(dfsCluster);
+ this.dfsClusterFixer.start();
return dfsCluster;
}
@@ -713,6 +769,12 @@ public void shutdownMiniDFSCluster() throws IOException {
// The below throws an exception per dn, AsynchronousCloseException.
this.dfsCluster.shutdown();
dfsCluster = null;
+ // It is possible that the dfs cluster is set through setDFSCluster method, where we will not
+ // have a fixer
+ if (dfsClusterFixer != null) {
+ this.dfsClusterFixer.shutdown();
+ dfsClusterFixer = null;
+ }
dataTestDirOnTestFS = null;
CommonFSUtils.setFsDefault(this.conf, new Path("file:///"));
}
diff --git a/pom.xml b/pom.xml
index b527afcc032b..26058dd3dbba 100644
--- a/pom.xml
+++ b/pom.xml
@@ -771,7 +771,7 @@
3.0.4
${compileSource}
- 3.1.2
+ 3.2.3
${hadoop-three.version}
@@ -781,7 +781,7 @@
3.10.5.Final
0.5.0
- 1.7.7
+ 1.11.0
2.8.1
1.13
1.6
@@ -3128,8 +3128,6 @@
--add-opens java.base/java.lang.reflect=ALL-UNNAMED
--add-exports java.base/jdk.internal.misc=ALL-UNNAMED
${hbase-surefire.argLine}
-
- 3.2.0
+
+ org.codehaus.jackson
+ *
+
+
+ javax.servlet.jsp
+ jsp-api
+
+
+ javax.xml.bind
+ jaxb-api
+
+
+ javax.ws.rs
+ jsr311-api
+