diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index 604c41c577d4..f94cae51953a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -144,6 +144,8 @@
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MiniMRCluster;
@@ -196,6 +198,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
public static final boolean PRESPLIT_TEST_TABLE = true;
private MiniDFSCluster dfsCluster = null;
+ private FsDatasetAsyncDiskServiceFixer dfsClusterFixer = null;
private volatile HBaseCluster hbaseCluster = null;
private MiniMRCluster mrCluster = null;
@@ -574,6 +577,56 @@ public boolean cleanupDataTestDirOnTestFS(String subdirName) throws IOException
return getTestFileSystem().delete(cpath, true);
}
+ // Workaround to avoid IllegalThreadStateException
+ // See HBASE-27148 for more details
+ private static final class FsDatasetAsyncDiskServiceFixer extends Thread {
+
+ private volatile boolean stopped = false;
+
+ private final MiniDFSCluster cluster;
+
+ FsDatasetAsyncDiskServiceFixer(MiniDFSCluster cluster) {
+ super("FsDatasetAsyncDiskServiceFixer");
+ setDaemon(true);
+ this.cluster = cluster;
+ }
+
+ @Override
+ public void run() {
+ while (!stopped) {
+ try {
+ Thread.sleep(30000);
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ continue;
+ }
+ // we could add new datanodes during tests, so here we will check every 30 seconds, as the
+ // timeout of the thread pool executor is 60 seconds by default.
+ try {
+ for (DataNode dn : cluster.getDataNodes()) {
+ FsDatasetSpi> dataset = dn.getFSDataset();
+ Field service = dataset.getClass().getDeclaredField("asyncDiskService");
+ service.setAccessible(true);
+ Object asyncDiskService = service.get(dataset);
+ Field group = asyncDiskService.getClass().getDeclaredField("threadGroup");
+ group.setAccessible(true);
+ ThreadGroup threadGroup = (ThreadGroup) group.get(asyncDiskService);
+ if (threadGroup.isDaemon()) {
+ threadGroup.setDaemon(false);
+ }
+ }
+ } catch (Exception e) {
+ LOG.warn("failed to reset thread pool timeout for FsDatasetAsyncDiskService", e);
+ }
+ }
+ }
+
+ void shutdown() {
+ stopped = true;
+ interrupt();
+ }
+ }
+
/**
* Start a minidfscluster.
* @param servers How many DNs to start. n * @see #shutdownMiniDFSCluster()
@@ -632,7 +685,8 @@ public MiniDFSCluster startMiniDFSCluster(int servers, final String racks[], Str
this.dfsCluster =
new MiniDFSCluster(0, this.conf, servers, true, true, true, null, racks, hosts, null);
-
+ this.dfsClusterFixer = new FsDatasetAsyncDiskServiceFixer(dfsCluster);
+ this.dfsClusterFixer.start();
// Set this just-started cluster as our filesystem.
setFs();
@@ -656,6 +710,8 @@ public MiniDFSCluster startMiniDFSClusterForTestWAL(int namenodePort) throws IOE
"ERROR");
dfsCluster =
new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null, null, null, null);
+ this.dfsClusterFixer = new FsDatasetAsyncDiskServiceFixer(dfsCluster);
+ this.dfsClusterFixer.start();
return dfsCluster;
}
@@ -778,6 +834,12 @@ public void shutdownMiniDFSCluster() throws IOException {
// The below throws an exception per dn, AsynchronousCloseException.
this.dfsCluster.shutdown();
dfsCluster = null;
+ // It is possible that the dfs cluster is set through setDFSCluster method, where we will not
+ // have a fixer
+ if (dfsClusterFixer != null) {
+ this.dfsClusterFixer.shutdown();
+ dfsClusterFixer = null;
+ }
dataTestDirOnTestFS = null;
CommonFSUtils.setFsDefault(this.conf, new Path("file:///"));
}
diff --git a/hbase-testing-util/pom.xml b/hbase-testing-util/pom.xml
index eaaab43cd1ae..3cdd6213c20d 100644
--- a/hbase-testing-util/pom.xml
+++ b/hbase-testing-util/pom.xml
@@ -128,6 +128,16 @@
test-jar
compile
+
+ org.mockito
+ mockito-core
+ compile
+
+
+ com.github.stephenc.findbugs
+ findbugs-annotations
+ compile
+
org.slf4j
jcl-over-slf4j
diff --git a/pom.xml b/pom.xml
index 8d0690f393a9..d02fadace041 100644
--- a/pom.xml
+++ b/pom.xml
@@ -544,7 +544,7 @@
${compileSource}
2.10.0
- 3.1.2
+ 3.2.3
@@ -557,7 +557,7 @@
3.6.2.Final
0.5.0
- 1.7.7
+ 1.11.0
2.8.1
1.13
2.11.0
@@ -2816,8 +2816,6 @@
--add-opens java.base/java.lang.reflect=ALL-UNNAMED
--add-exports java.base/jdk.internal.misc=ALL-UNNAMED
${hbase-surefire.argLine}
-
- 3.2.0
@@ -3777,19 +3767,7 @@
org.codehaus.jackson
- jackson-mapper-asl
-
-
- org.codehaus.jackson
- jackson-core-asl
-
-
- org.codehaus.jackson
- jackson-jaxrs
-
-
- org.codehaus.jackson
- jackson-xc
+ *
javax.xml.bind
@@ -3860,11 +3838,7 @@
org.codehaus.jackson
- jackson-core-asl
-
-
- org.codehaus.jackson
- jackson-mapper-asl
+ *
com.google.guava
@@ -3929,11 +3903,7 @@
org.codehaus.jackson
- jackson-core-asl
-
-
- org.codehaus.jackson
- jackson-mapper-asl
+ *
com.google.guava
@@ -3968,38 +3938,6 @@
-->
-
-
- org.apache.hadoop
- hadoop-hdfs
- ${hadoop-three.version}
- tests
- test-jar
- test
-
-
- com.sun.jersey
- jersey-core
-
-
- org.slf4j
- slf4j-log4j12
-
-
- log4j
- log4j
-
-
- ch.qos.reload4j
- reload4j
-
-
- org.slf4j
- slf4j-reload4j
-
-
-
org.apache.hadoop
hadoop-auth
@@ -4081,12 +4019,8 @@
junit
- org.codehause.jackson
- jackson-core-asl
-
-
- org.codehause.jackson
- jackson-mapper-asl
+ org.codehaus.jackson
+ *
org.slf4j
@@ -4182,6 +4116,29 @@
org.slf4j
slf4j-reload4j
+
+
+ org.codehaus.jackson
+ *
+
+
+ javax.servlet.jsp
+ jsp-api
+
+
+ javax.xml.bind
+ jaxb-api
+
+
+ javax.ws.rs
+ jsr311-api
+