diff --git a/hudi-common/src/test/java/org/apache/hudi/common/testutils/minicluster/HdfsTestService.java b/hudi-common/src/test/java/org/apache/hudi/common/testutils/minicluster/HdfsTestService.java index eda8591749dcf..ba584a43290c8 100644 --- a/hudi-common/src/test/java/org/apache/hudi/common/testutils/minicluster/HdfsTestService.java +++ b/hudi-common/src/test/java/org/apache/hudi/common/testutils/minicluster/HdfsTestService.java @@ -18,7 +18,6 @@ package org.apache.hudi.common.testutils.minicluster; -import org.apache.hudi.common.testutils.HoodieTestUtils; import org.apache.hudi.common.testutils.NetworkTestUtils; import org.apache.hudi.common.util.FileIOUtils; @@ -45,7 +44,7 @@ public class HdfsTestService { /** * Configuration settings. */ - private Configuration hadoopConf; + private final Configuration hadoopConf; private final String workDir; /** @@ -54,6 +53,7 @@ public class HdfsTestService { private MiniDFSCluster miniDfsCluster; public HdfsTestService() throws IOException { + hadoopConf = new Configuration(); workDir = Files.createTempDirectory("temp").toAbsolutePath().toString(); } @@ -63,7 +63,6 @@ public Configuration getHadoopConf() { public MiniDFSCluster start(boolean format) throws IOException { Objects.requireNonNull(workDir, "The work dir must be set before starting cluster."); - hadoopConf = HoodieTestUtils.getDefaultHadoopConf(); // If clean, then remove the work dir so we can start fresh. String localDFSLocation = getDFSLocation(workDir); @@ -107,7 +106,6 @@ public void stop() { miniDfsCluster.shutdown(true, true); } miniDfsCluster = null; - hadoopConf = null; } /** @@ -123,9 +121,9 @@ private static String getDFSLocation(String baseFsLocation) { /** * Configure the DFS Cluster before launching it. * - * @param config The already created Hadoop configuration we'll further configure for HDFS + * @param config The already created Hadoop configuration we'll further configure for HDFS * @param localDFSLocation The location on the local filesystem where cluster data is stored - * @param bindIP An IP address we want to force the datanode and namenode to bind to. + * @param bindIP An IP address we want to force the datanode and namenode to bind to. * @return The updated Configuration object. */ private static Configuration configureDFSCluster(Configuration config, String localDFSLocation, String bindIP, @@ -146,7 +144,7 @@ private static Configuration configureDFSCluster(Configuration config, String lo String user = System.getProperty("user.name"); config.set("hadoop.proxyuser." + user + ".groups", "*"); config.set("hadoop.proxyuser." + user + ".hosts", "*"); - config.setBoolean("dfs.permissions",false); + config.setBoolean("dfs.permissions", false); return config; } diff --git a/hudi-hadoop-mr/src/test/java/org/apache/hudi/hadoop/functional/TestHoodieCombineHiveInputFormat.java b/hudi-hadoop-mr/src/test/java/org/apache/hudi/hadoop/hive/TestHoodieCombineHiveInputFormat.java similarity index 98% rename from hudi-hadoop-mr/src/test/java/org/apache/hudi/hadoop/functional/TestHoodieCombineHiveInputFormat.java rename to hudi-hadoop-mr/src/test/java/org/apache/hudi/hadoop/hive/TestHoodieCombineHiveInputFormat.java index 0a14af2212ac3..9b26a7915dd44 100644 --- a/hudi-hadoop-mr/src/test/java/org/apache/hudi/hadoop/functional/TestHoodieCombineHiveInputFormat.java +++ b/hudi-hadoop-mr/src/test/java/org/apache/hudi/hadoop/hive/TestHoodieCombineHiveInputFormat.java @@ -16,10 +16,8 @@ * limitations under the License. */ -package org.apache.hudi.hadoop.functional; +package org.apache.hudi.hadoop.hive; -import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; -import org.apache.hadoop.hive.ql.io.IOContextMap; import org.apache.hudi.avro.HoodieAvroUtils; import org.apache.hudi.common.model.HoodieCommitMetadata; import org.apache.hudi.common.model.HoodieTableType; @@ -33,9 +31,6 @@ import org.apache.hudi.common.testutils.minicluster.MiniClusterUtil; import org.apache.hudi.common.util.CommitUtils; import org.apache.hudi.common.util.Option; -import org.apache.hudi.hadoop.hive.HoodieCombineHiveInputFormat; -import org.apache.hudi.hadoop.hive.HoodieCombineRealtimeFileSplit; -import org.apache.hudi.hadoop.hive.HoodieCombineRealtimeHiveSplit; import org.apache.hudi.hadoop.realtime.HoodieParquetRealtimeInputFormat; import org.apache.hudi.hadoop.testutils.InputFormatTestUtil; @@ -43,18 +38,20 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.exec.mr.ExecMapper; +import org.apache.hadoop.hive.ql.io.IOContextMap; import org.apache.hadoop.hive.ql.plan.MapredWork; import org.apache.hadoop.hive.ql.plan.PartitionDesc; import org.apache.hadoop.hive.ql.plan.TableDesc; import org.apache.hadoop.io.ArrayWritable; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.mapred.FileInputFormat; +import org.apache.hadoop.mapred.FileSplit; import org.apache.hadoop.mapred.InputSplit; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.RecordReader; -import org.apache.hadoop.mapred.FileSplit; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach;