diff --git a/common/pom.xml b/common/pom.xml
index c0fdaa70f0b3..b64d57fe5faa 100644
--- a/common/pom.xml
+++ b/common/pom.xml
@@ -81,7 +81,6 @@
org.eclipse.jetty
jetty-http
- ${jetty.version}
org.eclipse.jetty
@@ -195,6 +194,11 @@
tez-api
${tez.version}
+
+ org.fusesource.jansi
+ jansi
+ ${jansi.version}
+
com.google.code.tempus-fugit
diff --git a/data/conf/hive-site.xml b/data/conf/hive-site.xml
index 7681932c404e..e8a3798749e9 100644
--- a/data/conf/hive-site.xml
+++ b/data/conf/hive-site.xml
@@ -409,4 +409,20 @@
hive.txn.xlock.ctas
false
+
+
+
+ hive.server2.thrift.resultset.max.fetch.size
+ 1000000
+
+
+
+ hive.server2.webui.max.threads
+ 4
+
+
+
+ hive.async.cleanup.service.thread.count
+ 4
+
diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMultiOutputFormat.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMultiOutputFormat.java
index 93d1418afad4..e601992fc40b 100644
--- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMultiOutputFormat.java
+++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMultiOutputFormat.java
@@ -32,6 +32,7 @@
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
import org.apache.hadoop.hive.metastore.MetaStoreTestUtils;
@@ -279,6 +280,10 @@ public void testOutputFormat() throws Throwable {
infoList.add(OutputJobInfo.create("default", tableNames[1], partitionValues));
infoList.add(OutputJobInfo.create("default", tableNames[2], partitionValues));
+ // There are tests that check file permissions (which are manually set)
+ // Disable NN ACLS so that the manual permissions are observed
+ hiveConf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, false);
+
Job job = new Job(hiveConf, "SampleJob");
job.setMapperClass(MyMapper.class);
@@ -315,18 +320,18 @@ public void testOutputFormat() throws Throwable {
// Check permisssion on partition dirs and files created
for (int i = 0; i < tableNames.length; i++) {
- Path partitionFile = new Path(warehousedir + "/" + tableNames[i]
- + "/ds=1/cluster=ag/part-m-00000");
- FileSystem fs = partitionFile.getFileSystem(mrConf);
- Assert.assertEquals("File permissions of table " + tableNames[i] + " is not correct",
- fs.getFileStatus(partitionFile).getPermission(),
- new FsPermission(tablePerms[i]));
- Assert.assertEquals("File permissions of table " + tableNames[i] + " is not correct",
- fs.getFileStatus(partitionFile.getParent()).getPermission(),
- new FsPermission(tablePerms[i]));
- Assert.assertEquals("File permissions of table " + tableNames[i] + " is not correct",
- fs.getFileStatus(partitionFile.getParent().getParent()).getPermission(),
- new FsPermission(tablePerms[i]));
+ final Path partitionFile = new Path(warehousedir + "/" + tableNames[i] + "/ds=1/cluster=ag/part-m-00000");
+
+ final FileSystem fs = partitionFile.getFileSystem(mrConf);
+
+ Assert.assertEquals("File permissions of table " + tableNames[i] + " is not correct [" + partitionFile + "]",
+ new FsPermission(tablePerms[i]), fs.getFileStatus(partitionFile).getPermission());
+ Assert.assertEquals(
+ "File permissions of table " + tableNames[i] + " is not correct [" + partitionFile + "]",
+ new FsPermission(tablePerms[i]), fs.getFileStatus(partitionFile).getPermission());
+ Assert.assertEquals(
+ "File permissions of table " + tableNames[i] + " is not correct [" + partitionFile.getParent() + "]",
+ new FsPermission(tablePerms[i]), fs.getFileStatus(partitionFile.getParent()).getPermission());
}
LOG.info("File permissions verified");
diff --git a/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java b/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java
index 5b43323a62a0..0420c506136d 100644
--- a/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java
+++ b/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java
@@ -37,11 +37,13 @@
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
import org.apache.hadoop.hive.metastore.MetaStoreTestUtils;
+import org.apache.hadoop.hive.metastore.TransactionalMetaStoreEventListener;
import org.apache.hadoop.hive.metastore.Warehouse;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.NotificationEvent;
import org.apache.hadoop.hive.metastore.api.PartitionEventType;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.messaging.MessageEncoder;
import org.apache.hadoop.hive.metastore.messaging.json.JSONMessageEncoder;
import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat;
import org.apache.hadoop.hive.ql.io.RCFileInputFormat;
@@ -114,16 +116,16 @@ public static void startMetaStoreServer() throws Exception {
return;
}
- // Set proxy user privilege and initialize the global state of ProxyUsers
- Configuration conf = new Configuration();
- conf.set("hadoop.proxyuser." + Utils.getUGI().getShortUserName() + ".hosts", "*");
- ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
+ Configuration conf = MetastoreConf.newMetastoreConf();
- System.setProperty(HiveConf.ConfVars.METASTORE_TRANSACTIONAL_EVENT_LISTENERS.varname,
- DbNotificationListener.class.getName()); // turn on db notification listener on metastore
- System.setProperty(MetastoreConf.ConfVars.EVENT_MESSAGE_FACTORY.getHiveName(),
- JSONMessageEncoder.class.getName());
- msPort = MetaStoreTestUtils.startMetaStoreWithRetry();
+ // Disable proxy authorization white-list for testing
+ MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.EVENT_DB_NOTIFICATION_API_AUTH, false);
+
+ // turn on db notification listener on metastore
+ MetastoreConf.setClass(conf, MetastoreConf.ConfVars.TRANSACTIONAL_EVENT_LISTENERS, DbNotificationListener.class, TransactionalMetaStoreEventListener.class);
+ MetastoreConf.setClass(conf, MetastoreConf.ConfVars.EVENT_MESSAGE_FACTORY, JSONMessageEncoder.class, MessageEncoder.class);
+
+ msPort = MetaStoreTestUtils.startMetaStoreWithRetry(conf);
securityManager = System.getSecurityManager();
System.setSecurityManager(new NoExitSecurityManager());
Policy.setPolicy(new DerbyPolicy());
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/BaseReplicationAcrossInstances.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/BaseReplicationAcrossInstances.java
index f5f8dbdd8985..f486fc35a059 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/BaseReplicationAcrossInstances.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/BaseReplicationAcrossInstances.java
@@ -56,7 +56,6 @@ static void internalBeforeClassSetup(Map overrides, Class clazz)
throws Exception {
conf = new HiveConf(clazz);
conf.set("dfs.client.use.datanode.hostname", "true");
- conf.set("hadoop.proxyuser." + Utils.getUGI().getShortUserName() + ".hosts", "*");
conf.set("hive.repl.cmrootdir", "/tmp/");
conf.set("dfs.namenode.acls.enabled", "true");
MiniDFSCluster miniDFSCluster =
@@ -64,6 +63,8 @@ static void internalBeforeClassSetup(Map overrides, Class clazz)
Map localOverrides = new HashMap() {{
put("fs.defaultFS", miniDFSCluster.getFileSystem().getUri().toString());
put(HiveConf.ConfVars.HIVE_IN_TEST_REPL.varname, "true");
+ // Disable proxy authorization white-list for testing
+ put(MetastoreConf.ConfVars.EVENT_DB_NOTIFICATION_API_AUTH.getVarname(), "false");
}};
localOverrides.putAll(overrides);
setFullyQualifiedReplicaExternalTableBase(miniDFSCluster.getFileSystem());
@@ -88,7 +89,6 @@ static void internalBeforeClassSetupExclusiveReplica(Map primary
replicaConf = new HiveConf(clazz);
replicaConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, replicaBaseDir);
replicaConf.set("dfs.client.use.datanode.hostname", "true");
- replicaConf.set("hadoop.proxyuser." + Utils.getUGI().getShortUserName() + ".hosts", "*");
MiniDFSCluster miniReplicaDFSCluster =
new MiniDFSCluster.Builder(replicaConf).numDataNodes(2).format(true).build();
@@ -97,7 +97,6 @@ static void internalBeforeClassSetupExclusiveReplica(Map primary
conf = new HiveConf(clazz);
conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, primaryBaseDir);
conf.set("dfs.client.use.datanode.hostname", "true");
- conf.set("hadoop.proxyuser." + Utils.getUGI().getShortUserName() + ".hosts", "*");
MiniDFSCluster miniPrimaryDFSCluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).format(true).build();
// Setup primary warehouse.
@@ -106,6 +105,8 @@ static void internalBeforeClassSetupExclusiveReplica(Map primary
localOverrides.put(HiveConf.ConfVars.HIVE_IN_TEST_REPL.varname, "true");
localOverrides.put(HiveConf.ConfVars.REPL_EXTERNAL_TABLE_BASE_DIR.varname, fullyQualifiedReplicaExternalBase);
localOverrides.put("fs.defaultFS", miniPrimaryDFSCluster.getFileSystem().getUri().toString());
+ // Disable proxy authorization white-list for testing
+ localOverrides.put(MetastoreConf.ConfVars.EVENT_DB_NOTIFICATION_API_AUTH.getVarname(), "false");
localOverrides.putAll(primaryOverrides);
primary = new WarehouseInstance(LOG, miniPrimaryDFSCluster, localOverrides);
@@ -114,6 +115,7 @@ static void internalBeforeClassSetupExclusiveReplica(Map primary
localOverrides.put(HiveConf.ConfVars.REPL_EXTERNAL_TABLE_BASE_DIR.varname, fullyQualifiedReplicaExternalBase);
localOverrides.put("fs.defaultFS", miniReplicaDFSCluster.getFileSystem().getUri().toString());
localOverrides.put(HiveConf.ConfVars.HIVE_IN_TEST_REPL.varname, "true");
+ localOverrides.put(MetastoreConf.ConfVars.EVENT_DB_NOTIFICATION_API_AUTH.getVarname(), "false");
localOverrides.putAll(replicaOverrides);
replica = new WarehouseInstance(LOG, miniReplicaDFSCluster, localOverrides);
}
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationOnHDFSEncryptedZones.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationOnHDFSEncryptedZones.java
index c33188ff163b..731eb9c6bd73 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationOnHDFSEncryptedZones.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationOnHDFSEncryptedZones.java
@@ -28,7 +28,6 @@
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
-import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TestName;
@@ -60,8 +59,8 @@ public class TestReplicationOnHDFSEncryptedZones {
@BeforeClass
public static void beforeClassSetup() throws Exception {
System.setProperty("jceks.key.serialFilter", "java.lang.Enum;java.security.KeyRep;" +
- "java.security.KeyRep$Type;javax.crypto.spec.SecretKeySpec;" +
- "org.apache.hadoop.crypto.key.JavaKeyStoreProvider$KeyMetadata;!*");
+ "java.security.KeyRep$Type;javax.crypto.spec.SecretKeySpec;" +
+ "org.apache.hadoop.crypto.key.JavaKeyStoreProvider$KeyMetadata;!*");
conf = new Configuration();
conf.set("dfs.client.use.datanode.hostname", "true");
conf.set("hadoop.proxyuser." + Utils.getUGI().getShortUserName() + ".hosts", "*");
@@ -95,7 +94,7 @@ public void setup() throws Throwable {
primaryDbName = testName.getMethodName() + "_" + +System.currentTimeMillis();
replicatedDbName = "replicated_" + primaryDbName;
primary.run("create database " + primaryDbName + " WITH DBPROPERTIES ( '" +
- SOURCE_OF_REPLICATION + "' = '1,2,3')");
+ SOURCE_OF_REPLICATION + "' = '1,2,3')");
}
@Test
@@ -109,74 +108,38 @@ public void targetAndSourceHaveDifferentEncryptionZoneKeys() throws Throwable {
replicaConf.setBoolean("dfs.namenode.delegation.token.always-use", true);
MiniDFSCluster miniReplicaDFSCluster =
- new MiniDFSCluster.Builder(replicaConf).numDataNodes(2).format(true).build();
+ new MiniDFSCluster.Builder(replicaConf).numDataNodes(2).format(true).build();
replicaConf.setBoolean(METASTORE_AGGREGATE_STATS_CACHE_ENABLED.varname, false);
DFSTestUtil.createKey("test_key123", miniReplicaDFSCluster, replicaConf);
WarehouseInstance replica = new WarehouseInstance(LOG, miniReplicaDFSCluster,
- new HashMap() {{
- put(HiveConf.ConfVars.HIVE_IN_TEST.varname, "false");
- put(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS.varname, "false");
- put(HiveConf.ConfVars.HIVE_DISTCP_DOAS_USER.varname,
- UserGroupInformation.getCurrentUser().getUserName());
- put(HiveConf.ConfVars.REPLDIR.varname, primary.repldDir);
- }}, "test_key123");
-
- List dumpWithClause = Arrays.asList(
- "'hive.repl.add.raw.reserved.namespace'='true'",
- "'" + HiveConf.ConfVars.REPL_EXTERNAL_TABLE_BASE_DIR.varname + "'='"
- + replica.externalTableWarehouseRoot + "'",
- "'distcp.options.skipcrccheck'=''",
- "'" + HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS.varname + "'='false'",
- "'" + HiveConf.ConfVars.HIVE_DISTCP_DOAS_USER.varname + "'='"
- + UserGroupInformation.getCurrentUser().getUserName() +"'");
- WarehouseInstance.Tuple tuple =
- primary.run("use " + primaryDbName)
- .run("create table encrypted_table (id int, value string)")
- .run("insert into table encrypted_table values (1,'value1')")
- .run("insert into table encrypted_table values (2,'value2')")
- .dump(primaryDbName, dumpWithClause);
-
- replica
- .run("repl load " + primaryDbName + " into " + replicatedDbName
- + " with('hive.repl.add.raw.reserved.namespace'='true', "
- + "'hive.repl.replica.external.table.base.dir'='" + replica.externalTableWarehouseRoot + "', "
- + "'hive.exec.copyfile.maxsize'='0', 'distcp.options.skipcrccheck'='')")
- .run("use " + replicatedDbName)
- .run("repl status " + replicatedDbName)
- .verifyResult(tuple.lastReplicationId);
-
- try {
- replica
- .run("select value from encrypted_table")
- .verifyResults(new String[] { "value1", "value2" });
- Assert.fail("Src EZKey shouldn't be present on target");
- } catch (Throwable e) {
- while (e.getCause() != null) {
- e = e.getCause();
- }
- Assert.assertTrue(e.getMessage().contains("KeyVersion name 'test_key@0' does not exist"));
- }
+ new HashMap() {{
+ put(HiveConf.ConfVars.HIVE_IN_TEST.varname, "false");
+ put(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS.varname, "false");
+ put(HiveConf.ConfVars.HIVE_DISTCP_DOAS_USER.varname,
+ UserGroupInformation.getCurrentUser().getUserName());
+ put(HiveConf.ConfVars.REPLDIR.varname, primary.repldDir);
+ }}, "test_key123");
//read should pass without raw-byte distcp
- dumpWithClause = Arrays.asList( "'" + HiveConf.ConfVars.REPL_EXTERNAL_TABLE_BASE_DIR.varname + "'='"
- + replica.externalTableWarehouseRoot + "'");
- tuple = primary.run("use " + primaryDbName)
+ List dumpWithClause = Arrays.asList( "'" + HiveConf.ConfVars.REPL_EXTERNAL_TABLE_BASE_DIR.varname + "'='"
+ + replica.externalTableWarehouseRoot + "'");
+ WarehouseInstance.Tuple tuple =
+ primary.run("use " + primaryDbName)
.run("create external table encrypted_table2 (id int, value string)")
.run("insert into table encrypted_table2 values (1,'value1')")
.run("insert into table encrypted_table2 values (2,'value2')")
.dump(primaryDbName, dumpWithClause);
replica
- .run("repl load " + primaryDbName + " into " + replicatedDbName
- + " with('hive.repl.replica.external.table.base.dir'='" + replica.externalTableWarehouseRoot + "', "
- + "'hive.exec.copyfile.maxsize'='0', 'distcp.options.skipcrccheck'='')")
- .run("use " + replicatedDbName)
- .run("repl status " + replicatedDbName)
- .verifyResult(tuple.lastReplicationId)
- .run("select value from encrypted_table2")
- .verifyResults(new String[] { "value1", "value2" });
+ .run("repl load " + primaryDbName + " into " + replicatedDbName
+ + " with('hive.repl.replica.external.table.base.dir'='" + replica.externalTableWarehouseRoot + "', "
+ + "'hive.exec.copyfile.maxsize'='0', 'distcp.options.skipcrccheck'='')")
+ .run("use " + replicatedDbName)
+ .run("repl status " + replicatedDbName)
+ .run("select value from encrypted_table2")
+ .verifyResults(new String[] { "value1", "value2" });
}
@Test
@@ -190,7 +153,7 @@ public void targetAndSourceHaveSameEncryptionZoneKeys() throws Throwable {
replicaConf.setBoolean("dfs.namenode.delegation.token.always-use", true);
MiniDFSCluster miniReplicaDFSCluster =
- new MiniDFSCluster.Builder(replicaConf).numDataNodes(2).format(true).build();
+ new MiniDFSCluster.Builder(replicaConf).numDataNodes(2).format(true).build();
replicaConf.setBoolean(METASTORE_AGGREGATE_STATS_CACHE_ENABLED.varname, false);
WarehouseInstance replica = new WarehouseInstance(LOG, miniReplicaDFSCluster,
@@ -203,13 +166,13 @@ public void targetAndSourceHaveSameEncryptionZoneKeys() throws Throwable {
}}, "test_key");
List dumpWithClause = Arrays.asList(
- "'hive.repl.add.raw.reserved.namespace'='true'",
- "'" + HiveConf.ConfVars.REPL_EXTERNAL_TABLE_BASE_DIR.varname + "'='"
- + replica.externalTableWarehouseRoot + "'",
- "'distcp.options.skipcrccheck'=''",
- "'" + HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS.varname + "'='false'",
- "'" + HiveConf.ConfVars.HIVE_DISTCP_DOAS_USER.varname + "'='"
- + UserGroupInformation.getCurrentUser().getUserName() +"'");
+ "'hive.repl.add.raw.reserved.namespace'='true'",
+ "'" + HiveConf.ConfVars.REPL_EXTERNAL_TABLE_BASE_DIR.varname + "'='"
+ + replica.externalTableWarehouseRoot + "'",
+ "'distcp.options.skipcrccheck'=''",
+ "'" + HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS.varname + "'='false'",
+ "'" + HiveConf.ConfVars.HIVE_DISTCP_DOAS_USER.varname + "'='"
+ + UserGroupInformation.getCurrentUser().getUserName() +"'");
WarehouseInstance.Tuple tuple =
primary.run("use " + primaryDbName)
@@ -229,4 +192,4 @@ public void targetAndSourceHaveSameEncryptionZoneKeys() throws Throwable {
.run("select value from encrypted_table")
.verifyResults(new String[] { "value1", "value2" });
}
-}
+}
\ No newline at end of file
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
index afb8648ec341..910dda676582 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
@@ -199,7 +199,8 @@ static void internalBeforeClassSetup(Map additionalProperties)
hconf.set(MetastoreConf.ConfVars.THRIFT_URIS.getHiveName(), metastoreUri);
return;
}
-
+ // Disable auth so the call should succeed
+ MetastoreConf.setBoolVar(hconf, MetastoreConf.ConfVars.EVENT_DB_NOTIFICATION_API_AUTH, false);
hconf.set(MetastoreConf.ConfVars.TRANSACTIONAL_EVENT_LISTENERS.getHiveName(),
DBNOTIF_LISTENER_CLASSNAME); // turn on db notification listener on metastore
hconf.setBoolVar(HiveConf.ConfVars.REPLCMENABLED, true);
@@ -207,6 +208,7 @@ static void internalBeforeClassSetup(Map additionalProperties)
hconf.setVar(HiveConf.ConfVars.REPLCMDIR, TEST_PATH + "/cmroot/");
proxySettingName = "hadoop.proxyuser." + Utils.getUGI().getShortUserName() + ".hosts";
hconf.set(proxySettingName, "*");
+ MetastoreConf.setBoolVar(hconf, MetastoreConf.ConfVars.EVENT_DB_NOTIFICATION_API_AUTH, false);
hconf.setVar(HiveConf.ConfVars.REPLDIR,TEST_PATH + "/hrepl/");
hconf.set(MetastoreConf.ConfVars.THRIFT_CONNECTION_RETRIES.getHiveName(), "3");
hconf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
@@ -247,9 +249,10 @@ static void internalBeforeClassSetup(Map additionalProperties)
hconfMirrorServer.set(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, "jdbc:derby:;databaseName=metastore_db2;create=true");
MetaStoreTestUtils.startMetaStoreWithRetry(hconfMirrorServer, true);
hconfMirror = new HiveConf(hconf);
+ MetastoreConf.setBoolVar(hconfMirror, MetastoreConf.ConfVars.EVENT_DB_NOTIFICATION_API_AUTH, false);
+ hconfMirrorServer.set(proxySettingName, "*");
String thriftUri = MetastoreConf.getVar(hconfMirrorServer, MetastoreConf.ConfVars.THRIFT_URIS);
MetastoreConf.setVar(hconfMirror, MetastoreConf.ConfVars.THRIFT_URIS, thriftUri);
-
driverMirror = DriverFactory.newDriver(hconfMirror);
metaStoreClientMirror = new HiveMetaStoreClient(hconfMirror);
@@ -4188,27 +4191,32 @@ public void testAuthForNotificationAPIs() throws Exception {
NotificationEventResponse rsp = metaStoreClient.getNextNotification(firstEventId, 0, null);
assertEquals(1, rsp.getEventsSize());
// Test various scenarios
- // Remove the proxy privilege and the auth should fail (in reality the proxy setting should not be changed on the fly)
- hconf.unset(proxySettingName);
- // Need to explicitly update ProxyUsers
- ProxyUsers.refreshSuperUserGroupsConfiguration(hconf);
- // Verify if the auth should fail
- Exception ex = null;
+ // Remove the proxy privilege by reseting proxy configuration to default value.
+ // The auth should fail (in reality the proxy setting should not be changed on the fly)
+ // Pretty hacky: Affects both instances of HMS
+ ProxyUsers.refreshSuperUserGroupsConfiguration();
try {
+ hconf.setBoolVar(HiveConf.ConfVars.HIVE_IN_TEST, false);
+ MetastoreConf.setBoolVar(hconf, MetastoreConf.ConfVars.EVENT_DB_NOTIFICATION_API_AUTH, true);
rsp = metaStoreClient.getNextNotification(firstEventId, 0, null);
+ Assert.fail("Get Next Nofitication should have failed due to no proxy auth");
} catch (TException e) {
- ex = e;
+ // Expected to throw an Exception - keep going
}
- assertNotNull(ex);
// Disable auth so the call should succeed
MetastoreConf.setBoolVar(hconf, MetastoreConf.ConfVars.EVENT_DB_NOTIFICATION_API_AUTH, false);
+ MetastoreConf.setBoolVar(hconfMirror, MetastoreConf.ConfVars.EVENT_DB_NOTIFICATION_API_AUTH, false);
try {
rsp = metaStoreClient.getNextNotification(firstEventId, 0, null);
assertEquals(1, rsp.getEventsSize());
} finally {
// Restore the settings
- MetastoreConf.setBoolVar(hconf, MetastoreConf.ConfVars.EVENT_DB_NOTIFICATION_API_AUTH, true);
+ MetastoreConf.setBoolVar(hconf, MetastoreConf.ConfVars.EVENT_DB_NOTIFICATION_API_AUTH, false);
+ hconf.setBoolVar(HiveConf.ConfVars.HIVE_IN_TEST, true);
hconf.set(proxySettingName, "*");
+
+ // Restore Proxy configurations to test values
+ // Pretty hacky: Applies one setting to both instances of HMS
ProxyUsers.refreshSuperUserGroupsConfiguration(hconf);
}
}
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
index 6efcf681bc64..9256d288f221 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
@@ -2815,11 +2815,12 @@ public void testGetQueryLog() throws Exception {
String sql = "select count(*) from " + tableName;
// Verify the fetched log (from the beginning of log file)
- HiveStatement stmt = (HiveStatement)con.createStatement();
- assertNotNull("Statement is null", stmt);
- stmt.executeQuery(sql);
- List logs = stmt.getQueryLog(false, 10000);
- stmt.close();
+ List logs;
+ try (HiveStatement stmt = (HiveStatement) con.createStatement()) {
+ assertNotNull("Statement is null", stmt);
+ stmt.executeQuery(sql);
+ logs = stmt.getQueryLog(false, 200000);
+ }
verifyFetchedLog(logs, expectedLogs);
// Verify the fetched log (incrementally)
@@ -3010,8 +3011,7 @@ private void verifyFetchedLog(List logs, String[] expectedLogs) {
}
String accumulatedLogs = stringBuilder.toString();
for (String expectedLog : expectedLogs) {
- LOG.info("Checking match for " + expectedLog);
- assertTrue(accumulatedLogs.contains(expectedLog));
+ assertTrue("Failed to find match for " + expectedLog, accumulatedLogs.contains(expectedLog));
}
}
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/OperationLoggingAPITestBase.java b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/OperationLoggingAPITestBase.java
index c1b937806731..6e101486cd2d 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/OperationLoggingAPITestBase.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/OperationLoggingAPITestBase.java
@@ -86,7 +86,7 @@ public void testFetchResultsOfLogWithVerboseMode() throws Exception {
client.executeStatement(sessionHandle, queryString, null);
// verify whether the sql operation log is generated and fetch correctly.
OperationHandle operationHandle = client.executeStatement(sessionHandle, sqlCntStar, null);
- RowSet rowSetLog = client.fetchResults(operationHandle, FetchOrientation.FETCH_FIRST, 1000,
+ RowSet rowSetLog = client.fetchResults(operationHandle, FetchOrientation.FETCH_FIRST, 100000,
FetchType.LOG);
// Verbose Logs should contain everything, including execution and performance
verifyFetchedLog(rowSetLog, expectedLogsVerbose);
@@ -101,7 +101,7 @@ public void testFetchResultsOfLogWithPerformanceMode() throws Exception {
client.executeStatement(sessionHandle, queryString, null);
// verify whether the sql operation log is generated and fetch correctly.
OperationHandle operationHandle = client.executeStatement(sessionHandle, sqlCntStar, null);
- RowSet rowSetLog = client.fetchResults(operationHandle, FetchOrientation.FETCH_FIRST, 1000,
+ RowSet rowSetLog = client.fetchResults(operationHandle, FetchOrientation.FETCH_FIRST, 100000,
FetchType.LOG);
// rowSetLog should contain execution as well as performance logs
verifyFetchedLog(rowSetLog, expectedLogsExecution);
diff --git a/itests/pom.xml b/itests/pom.xml
index c74ae7d9b0fb..52508f12894d 100644
--- a/itests/pom.xml
+++ b/itests/pom.xml
@@ -352,6 +352,12 @@
org.apache.hadoop
hadoop-yarn-client
${hadoop.version}
+
+
+ org.jline
+ jline
+
+
org.apache.hadoop
diff --git a/itests/qtest-druid/pom.xml b/itests/qtest-druid/pom.xml
index c0e74a0d923b..95fcc516aec7 100644
--- a/itests/qtest-druid/pom.xml
+++ b/itests/qtest-druid/pom.xml
@@ -33,7 +33,7 @@
1.15.0
4.0.0
1.19.3
- 9.4.10.v20180503
+ 9.4.40.v20210413
10.11.1.1
16.0.1
4.1.0
diff --git a/itests/qtest/pom.xml b/itests/qtest/pom.xml
index bc58476789df..fa3f2f2807cd 100644
--- a/itests/qtest/pom.xml
+++ b/itests/qtest/pom.xml
@@ -418,6 +418,24 @@
com.oracle.database.jdbc
ojdbc8
+
+ com.google.inject
+ guice
+ 4.1.0
+ test
+
+
+ com.google.inject.extensions
+ guice-multibindings
+ 4.1.0
+ test
+
+
+ com.google.inject.extensions
+ guice-servlet
+ 4.1.0
+ test
+
diff --git a/llap-common/pom.xml b/llap-common/pom.xml
index 686e85fb47d4..6624065bbf16 100644
--- a/llap-common/pom.xml
+++ b/llap-common/pom.xml
@@ -64,7 +64,6 @@
org.apache.hadoop
hadoop-common
- true
org.slf4j
diff --git a/llap-server/pom.xml b/llap-server/pom.xml
index 42251033679a..0b225d6a81a6 100644
--- a/llap-server/pom.xml
+++ b/llap-server/pom.xml
@@ -221,7 +221,6 @@
org.eclipse.jetty
jetty-util
- ${jetty.version}
com.lmax
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
index 06c6009496e7..336bf056915e 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/ContainerRunnerImpl.java
@@ -248,7 +248,7 @@ public SubmitWorkResponseProto submitWork(SubmitWorkRequestProto request) throws
vertex.getVertexName(), request.getFragmentNumber(), request.getAttemptNumber());
// This is the start of container-annotated logging.
- final String dagId = attemptId.getTaskID().getVertexID().getDAGId().toString();
+ final String dagId = attemptId.getDAGID().toString();
final String queryId = vertex.getHiveQueryId();
final String fragmentId = LlapTezUtils.stripAttemptPrefix(fragmentIdString);
MDC.put("dagId", dagId);
@@ -270,7 +270,7 @@ public SubmitWorkResponseProto submitWork(SubmitWorkRequestProto request) throws
env.put(ApplicationConstants.Environment.USER.name(), vertex.getUser());
TezTaskAttemptID taskAttemptId = TezTaskAttemptID.fromString(fragmentIdString);
- int dagIdentifier = taskAttemptId.getTaskID().getVertexID().getDAGId().getId();
+ int dagIdentifier = taskAttemptId.getDAGID().getId();
QueryIdentifier queryIdentifier = new QueryIdentifier(
qIdProto.getApplicationIdString(), dagIdentifier);
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapTaskReporter.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapTaskReporter.java
index b4f51f0560d1..f3b337032bd8 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapTaskReporter.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapTaskReporter.java
@@ -291,7 +291,7 @@ private synchronized ResponseWrapper heartbeat(Collection eventsArg) t
int fromPreRoutedEventId = task.getNextPreRoutedEventId();
int maxEvents = Math.min(maxEventsToGet, task.getMaxEventsToHandle());
TezHeartbeatRequest request = new TezHeartbeatRequest(requestId, events, fromPreRoutedEventId,
- containerIdStr, task.getTaskAttemptID(), fromEventId, maxEvents);
+ containerIdStr, task.getTaskAttemptID(), fromEventId, maxEvents, 0);
LOG.debug("Sending heartbeat to AM, request={}", request);
maybeLogCounters();
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
index 99f226c799df..f299c58c5b85 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
@@ -330,7 +330,7 @@ private String constructThreadNameSuffix(TezTaskAttemptID taskAttemptId) {
StringBuilder sb = new StringBuilder();
TezTaskID taskId = taskAttemptId.getTaskID();
TezVertexID vertexId = taskId.getVertexID();
- TezDAGID dagId = vertexId.getDAGId();
+ TezDAGID dagId = vertexId.getDAGID();
ApplicationId appId = dagId.getApplicationId();
long clusterTs = appId.getClusterTimestamp();
long clusterTsShort = clusterTs % 1_000_000L;
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapInputFormat.java b/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapInputFormat.java
index 789b637bd6b9..19f8ca8dd600 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapInputFormat.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapInputFormat.java
@@ -141,6 +141,8 @@ public RecordReader getRecordReader(
// This starts the reader in the background.
rr.start();
return result;
+ } catch (IOException ioe) {
+ throw ioe;
} catch (Exception ex) {
Throwable rootCause = JavaUtils.findRootCause(ex);
if (checkLimitReached(job)
diff --git a/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java b/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java
index a9ae27f25dc7..c5bb75b8ec67 100644
--- a/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java
+++ b/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java
@@ -430,7 +430,7 @@ public void startUpdateGuaranteed(TezTaskAttemptID attemptId, NodeInfo assig
UpdateFragmentRequestProto request = UpdateFragmentRequestProto.newBuilder()
.setIsGuaranteed(newState).setFragmentIdentifierString(attemptId.toString())
.setQueryIdentifier(constructQueryIdentifierProto(
- attemptId.getTaskID().getVertexID().getDAGId().getId())).build();
+ attemptId.getDAGID().getId())).build();
communicator.sendUpdateFragment(request, nodeId.getHostname(), nodeId.getPort(),
new LlapProtocolClientProxy.ExecuteRequestCallback() {
@@ -457,7 +457,7 @@ public void registerRunningTaskAttempt(final ContainerId containerId, final Task
int priority) {
super.registerRunningTaskAttempt(containerId, taskSpec, additionalResources, credentials,
credentialsChanged, priority);
- int dagId = taskSpec.getTaskAttemptID().getTaskID().getVertexID().getDAGId().getId();
+ int dagId = taskSpec.getDAGID().getId();
if (currentQueryIdentifierProto == null || (dagId != currentQueryIdentifierProto.getDagIndex())) {
String hiveQueryId = extractQueryIdFromContext();
try {
@@ -611,7 +611,7 @@ private void sendTaskTerminated(final TezTaskAttemptID taskAttemptId,
TerminateFragmentRequestProto request =
TerminateFragmentRequestProto.newBuilder().setQueryIdentifier(
constructQueryIdentifierProto(
- taskAttemptId.getTaskID().getVertexID().getDAGId().getId()))
+ taskAttemptId.getDAGID().getId()))
.setFragmentIdentifierString(taskAttemptId.toString()).build();
communicator.sendTerminateFragment(request, nodeId.getHostname(), nodeId.getPort(),
new LlapProtocolClientProxy.ExecuteRequestCallback() {
@@ -755,7 +755,7 @@ private String constructLogUrl(final TezTaskAttemptID attemptID, final NodeId co
private String constructLlapLogUrl(final TezTaskAttemptID attemptID, final String containerIdString,
final boolean isDone, final String nmAddress) {
- String dagId = attemptID.getTaskID().getVertexID().getDAGId().toString();
+ String dagId = attemptID.getDAGID().toString();
String filename = JOINER.join(currentHiveQueryId, "-", dagId, ".log", (isDone ? ".done" : ""),
"?nm.id=", nmAddress);
String url = PATH_JOINER.join(timelineServerUri, "ws", "v1", "applicationhistory", "containers",
@@ -904,7 +904,7 @@ private SubmitWorkRequestProto constructSubmitWorkRequest(ContainerId containerI
builder.setAmPort(getAddress().getPort());
Preconditions.checkState(currentQueryIdentifierProto.getDagIndex() ==
- taskSpec.getTaskAttemptID().getTaskID().getVertexID().getDAGId().getId());
+ taskSpec.getDAGID().getId());
builder.setCredentialsBinary(
diff --git a/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskSchedulerService.java b/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskSchedulerService.java
index 473ab1eb18ac..85c34ddfc067 100644
--- a/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskSchedulerService.java
+++ b/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskSchedulerService.java
@@ -1170,7 +1170,7 @@ public void allocateTask(Object task, Resource capability, String[] hosts, Strin
task, priority, capability);
if (!dagRunning) {
if (metrics != null && id != null) {
- metrics.setDagId(id.getTaskID().getVertexID().getDAGId().toString());
+ metrics.setDagId(id.getDAGID().toString());
}
dagRunning = true;
}
@@ -1191,7 +1191,7 @@ public void allocateTask(Object task, Resource capability, ContainerId container
task, priority, capability, containerId);
if (!dagRunning) {
if (metrics != null && id != null) {
- metrics.setDagId(id.getTaskID().getVertexID().getDAGId().toString());
+ metrics.setDagId(id.getDAGID().toString());
}
dagRunning = true;
}
@@ -1204,7 +1204,7 @@ public void allocateTask(Object task, Resource capability, ContainerId container
protected TezTaskAttemptID getTaskAttemptId(Object task) {
// TODO: why does Tez API use "Object" for this?
if (task instanceof TaskAttempt) {
- return ((TaskAttempt)task).getID();
+ return ((TaskAttempt)task).getTaskAttemptID();
}
throw new AssertionError("LLAP plugin can only schedule task attempts");
}
@@ -2108,7 +2108,7 @@ private boolean addTaskPreemptionCandidate(TreeMap> r
if (preemptHosts != null && !preemptHosts.contains(taskInfo.getAssignedNode().getHost())) {
continue; // Not the right host.
}
- Map> depInfo = getDependencyInfo(taskInfo.getAttemptId().getTaskID().getVertexID().getDAGId());
+ Map> depInfo = getDependencyInfo(taskInfo.getAttemptId().getDAGID());
Set vertexDepInfo = null;
if (depInfo != null) {
vertexDepInfo = depInfo.get(vertexNum(forTask));
diff --git a/llap-tez/src/test/org/apache/hadoop/hive/llap/tezplugins/TestLlapTaskCommunicator.java b/llap-tez/src/test/org/apache/hadoop/hive/llap/tezplugins/TestLlapTaskCommunicator.java
index 3bbbdf3fff20..5a23d6d6cc1c 100644
--- a/llap-tez/src/test/org/apache/hadoop/hive/llap/tezplugins/TestLlapTaskCommunicator.java
+++ b/llap-tez/src/test/org/apache/hadoop/hive/llap/tezplugins/TestLlapTaskCommunicator.java
@@ -363,6 +363,7 @@ private TaskSpec createBaseTaskSpec(String vertexName, TezVertexID vertexId, int
TezTaskAttemptID taskAttemptId = TezTaskAttemptID.getInstance(
TezTaskID.getInstance(vertexId, taskIdx), 0);
doReturn(taskAttemptId).when(taskSpec).getTaskAttemptID();
+ doReturn(taskAttemptId.getDAGID()).when(taskSpec).getDAGID();
doReturn(DAG_NAME).when(taskSpec).getDAGName();
doReturn(vertexName).when(taskSpec).getVertexName();
ProcessorDescriptor processorDescriptor = ProcessorDescriptor.create("fakeClassName").setUserPayload(userPayload);
diff --git a/pom.xml b/pom.xml
index b77c249781fb..9358f76a8a90 100644
--- a/pom.xml
+++ b/pom.xml
@@ -131,7 +131,7 @@
19.0
2.4.21
2.1.210
- 3.1.0
+ 3.3.1
${basedir}/${hive.path.to.root}/testutils/hadoop
1.3
2.0.0-alpha4
@@ -151,7 +151,7 @@
2.3.1
5.5.1
1.1
- 9.3.27.v20190418
+ 9.4.40.v20210413
1.19
2.14.6
2.0.2
@@ -192,7 +192,7 @@
1.7.30
4.0.4
4.0.0-alpha-2-SNAPSHOT
- 0.10.1
+ 0.10.2
2.2.0
1.1
1.1.8.4
@@ -216,6 +216,7 @@
1.1.0.Final
1.0.1
1.11.901
+ 2.3.4
@@ -744,6 +745,11 @@
jetty-webapp
${jetty.version}
+
+ org.eclipse.jetty
+ jetty-http
+ ${jetty.version}
+
org.eclipse.jetty
jetty-util
@@ -1018,6 +1024,10 @@
org.slf4j
slf4j-log4j12
+
+ org.jline
+ jline
+
commons-logging
commons-logging
@@ -1052,6 +1062,12 @@
org.apache.hadoop
hadoop-yarn-client
${hadoop.version}
+
+
+ org.jline
+ jline
+
+
org.apache.hadoop
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
index 173a283f807d..c3fc4094504b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
@@ -430,16 +430,18 @@ private List getNextSplits() throws Exception {
private void generateWrappedSplits(InputFormat inputFormat, List inputSplits, JobConf job)
throws IOException {
- InputSplit[] splits;
+ InputSplit[] splits = new InputSplit[0];
try {
splits = inputFormat.getSplits(job, 1);
+ } catch (InvalidInputException iie) {
+ LOG.warn("Input path " + currPath + " is empty", iie);
} catch (Exception ex) {
Throwable t = ExceptionUtils.getRootCause(ex);
if (t instanceof FileNotFoundException || t instanceof InvalidInputException) {
- LOG.warn("Input path " + currPath + " is empty", t.getMessage());
- return;
+ LOG.warn("Input path " + currPath + " is empty", t);
+ } else {
+ throw ex;
}
- throw ex;
}
for (int i = 0; i < splits.length; i++) {
inputSplits.add(new FetchInputFormatSplit(splits[i], inputFormat));
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java
index 2235904c818a..66db1676b306 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java
@@ -867,5 +867,12 @@ public DAGStatus waitForCompletionWithStatusUpdates(@Nullable Set
return dagClient.waitForCompletionWithStatusUpdates(statusGetOpts);
}
}
+
+ @Override
+ public String getWebUIAddress() throws IOException, TezException {
+ synchronized (dagClient) {
+ return dagClient.getWebUIAddress();
+ }
+ }
}
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/RecordReaderWrapper.java b/ql/src/java/org/apache/hadoop/hive/ql/io/RecordReaderWrapper.java
index 523c770cf74c..e950da218204 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/RecordReaderWrapper.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/RecordReaderWrapper.java
@@ -36,6 +36,7 @@
import org.slf4j.LoggerFactory;
import java.io.IOException;
+import java.io.InterruptedIOException;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.util.ArrayList;
@@ -69,7 +70,15 @@ static RecordReader create(InputFormat inputFormat, HiveInputFormat.HiveInputSpl
JobConf jobConf, Reporter reporter) throws IOException {
int headerCount = Utilities.getHeaderCount(tableDesc);
int footerCount = Utilities.getFooterCount(tableDesc, jobConf);
- RecordReader innerReader = inputFormat.getRecordReader(split.getInputSplit(), jobConf, reporter);
+
+ RecordReader innerReader = null;
+ try {
+ innerReader = inputFormat.getRecordReader(split.getInputSplit(), jobConf, reporter);
+ } catch (InterruptedIOException iioe) {
+ // If reading from the underlying record reader is interrupted, return a no-op record reader
+ LOG.info("Interrupted while getting the input reader for {}", split.getInputSplit());
+ return new ZeroRowsInputFormat().getRecordReader(split.getInputSplit(), jobConf, reporter);
+ }
if (LOG.isDebugEnabled()) {
LOG.debug("Using {} to read data with skip.header.line.count {} and skip.footer.line.count {}",
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/StorageBasedAuthorizationProvider.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/StorageBasedAuthorizationProvider.java
index 56be71962745..1058ce3b6d1f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/StorageBasedAuthorizationProvider.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/StorageBasedAuthorizationProvider.java
@@ -178,8 +178,7 @@ public void authorize(Database db, Privilege[] readRequiredPriv, Privilege[] wri
private static boolean userHasProxyPrivilege(String user, Configuration conf) {
try {
- if (MetaStoreServerUtils.checkUserHasHostProxyPrivileges(user, conf,
- HMSHandler.getIPAddress())) {
+ if (MetaStoreServerUtils.checkUserHasHostProxyPrivileges(user, conf, HMSHandler.getIPAddress())) {
LOG.info("user {} has host proxy privilege.", user);
return true;
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/metastore/HiveMetaStoreAuthorizer.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/metastore/HiveMetaStoreAuthorizer.java
index c3a6ef683bc9..969254ab1ae1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/metastore/HiveMetaStoreAuthorizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/metastore/HiveMetaStoreAuthorizer.java
@@ -53,6 +53,7 @@
import org.apache.hadoop.hive.ql.security.authorization.plugin.metastore.filtercontext.DatabaseFilterContext;
import org.apache.hadoop.hive.ql.security.authorization.plugin.metastore.filtercontext.TableFilterContext;
import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authorize.ProxyUsers;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -489,6 +490,7 @@ HiveAuthorizer createHiveMetaStoreAuthorizer() throws Exception {
boolean isSuperUser(String userName) {
Configuration conf = getConf();
String ipAddress = HMSHandler.getIPAddress();
+ ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
return (MetaStoreServerUtils.checkUserHasHostProxyPrivileges(userName, conf, ipAddress));
}
diff --git a/ql/src/test/queries/clientpositive/acid_table_directories_test.q b/ql/src/test/queries/clientpositive/acid_table_directories_test.q
index 2ad21263b236..048e8561e88b 100644
--- a/ql/src/test/queries/clientpositive/acid_table_directories_test.q
+++ b/ql/src/test/queries/clientpositive/acid_table_directories_test.q
@@ -1,3 +1,5 @@
+--! qt:disabled:disabled Tests the output of LS -R and that changes, Post Hadoop 3.3.x the output isn't sorted, so
+--disabled as part of HIVE-24484 (Upgrade Hadoop to 3.3.1)
set hive.mapred.mode=nonstrict;
set hive.support.concurrency=true;
set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
diff --git a/ql/src/test/queries/clientpositive/encryption_load_data_to_encrypted_tables.q b/ql/src/test/queries/clientpositive/encryption_load_data_to_encrypted_tables.q
index 834bfbcc2bb9..03b00ca1eb04 100644
--- a/ql/src/test/queries/clientpositive/encryption_load_data_to_encrypted_tables.q
+++ b/ql/src/test/queries/clientpositive/encryption_load_data_to_encrypted_tables.q
@@ -11,7 +11,8 @@ LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE encrypted
SELECT * FROM encrypted_table_n0;
-- Test loading data from the hdfs filesystem;
-dfs -copyFromLocal ../../data/files/kv1.txt hdfs:///tmp/kv1.txt;
+dfs -mkdir hdfs:///tmp;
+dfs -copyFromLocal -f ../../data/files/kv1.txt hdfs:///tmp/kv1.txt;
LOAD DATA INPATH '/tmp/kv1.txt' OVERWRITE INTO TABLE encrypted_table_n0;
SELECT * FROM encrypted_table_n0;
diff --git a/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java b/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
index b8ff0e35671f..a73c50253bd6 100644
--- a/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
+++ b/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
@@ -51,6 +51,7 @@
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FutureDataInputStreamBuilder;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
@@ -766,6 +767,11 @@ public ProxyFileSystem23(FileSystem fs, URI uri) {
super(fs, uri);
}
+ @Override
+ public FutureDataInputStreamBuilder openFile(Path path) throws IOException, UnsupportedOperationException {
+ return super.openFile(ProxyFileSystem23.super.swizzleParamPath(path));
+ }
+
@Override
public RemoteIterator listLocatedStatus(final Path f)
throws FileNotFoundException, IOException {
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/DefaultIncompatibleTableChangeHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/DefaultIncompatibleTableChangeHandler.java
index f45772f3da44..c9317c933946 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/DefaultIncompatibleTableChangeHandler.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/DefaultIncompatibleTableChangeHandler.java
@@ -109,7 +109,7 @@ private void checkColTypeChangeCompatible(Configuration conf, Table oldTable,
throw new InvalidOperationException(
"The following columns have types incompatible with the existing " +
"columns in their respective positions :\n" +
- org.apache.commons.lang.StringUtils.join(incompatibleCols, ',')
+ org.apache.commons.lang3.StringUtils.join(incompatibleCols, ',')
);
}
}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java
index 6b6f5e463c86..79be3ec81dbd 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java
@@ -124,6 +124,7 @@
import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME;
import static org.apache.hadoop.hive.metastore.Warehouse.getCatalogQualifiedTableName;
import static org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.TABLE_IS_CTLT;
+import static org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars.HIVE_IN_TEST;
import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.CAT_NAME;
import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DB_NAME;
import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog;
@@ -293,7 +294,7 @@ PartitionExpressionProxy getExpressionProxy() {
public HMSHandler(String name, Configuration conf) {
super(name);
this.conf = conf;
- isInTest = MetastoreConf.getBoolVar(this.conf, ConfVars.HIVE_IN_TEST);
+ isInTest = MetastoreConf.getBoolVar(this.conf, HIVE_IN_TEST);
if (threadPool == null) {
synchronized (HMSHandler.class) {
if (threadPool == null) {
@@ -9373,7 +9374,8 @@ public NotificationEventsCountResponse get_notification_events_count(Notificatio
private void authorizeProxyPrivilege() throws TException {
// Skip the auth in embedded mode or if the auth is disabled
if (!HiveMetaStore.isMetaStoreRemote() ||
- !MetastoreConf.getBoolVar(conf, ConfVars.EVENT_DB_NOTIFICATION_API_AUTH)) {
+ !MetastoreConf.getBoolVar(conf, ConfVars.EVENT_DB_NOTIFICATION_API_AUTH) || conf.getBoolean(HIVE_IN_TEST.getVarname(),
+ false)) {
return;
}
String user = null;
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index 083d1e39c4d7..a9df86b2b870 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -408,7 +408,7 @@ private static ThriftServer startHttpMetastore(int port, Configuration conf)
thread.setName("Metastore-HttpHandler-Pool: Thread-" + thread.getId());
return thread;
});
- ExecutorThreadPool threadPool = new ExecutorThreadPool(executorService);
+ ExecutorThreadPool threadPool = new ExecutorThreadPool((ThreadPoolExecutor) executorService);
// HTTP Server
org.eclipse.jetty.server.Server server = new Server(threadPool);
server.setStopAtShutdown(true);
diff --git a/standalone-metastore/pom.xml b/standalone-metastore/pom.xml
index 42f5e9631902..302d05d36a64 100644
--- a/standalone-metastore/pom.xml
+++ b/standalone-metastore/pom.xml
@@ -75,7 +75,7 @@
3.1.0
19.0
- 3.1.0
+ 3.3.1
2.6.1
2.12.7
5.5.1
@@ -106,7 +106,7 @@
4.4.13
4.5.5
9.20
- 9.3.27.v20190418
+ 9.4.40.v20210413
you-must-set-this-to-run-thrift
${basedir}/src/gen/thrift
@@ -240,6 +240,10 @@
hadoop-mapreduce-client-core
${hadoop.version}
+
+ org.jline
+ jline
+
io.netty
netty
diff --git a/storage-api/pom.xml b/storage-api/pom.xml
index d34c71c11381..e96b8a2a6a57 100644
--- a/storage-api/pom.xml
+++ b/storage-api/pom.xml
@@ -29,7 +29,7 @@
1.8
1.1.3
19.0
- 3.1.0
+ 3.3.1
4.13
5.6.3
5.6.3
diff --git a/storage-api/src/java/org/apache/hadoop/hive/common/ValidReadTxnList.java b/storage-api/src/java/org/apache/hadoop/hive/common/ValidReadTxnList.java
index f75aeb37d71d..4f32583e2408 100644
--- a/storage-api/src/java/org/apache/hadoop/hive/common/ValidReadTxnList.java
+++ b/storage-api/src/java/org/apache/hadoop/hive/common/ValidReadTxnList.java
@@ -18,10 +18,10 @@
package org.apache.hadoop.hive.common;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hive.common.util.SuppressFBWarnings;
-import org.apache.commons.lang.ArrayUtils;
+import org.apache.commons.lang3.ArrayUtils;
import java.util.ArrayList;
import java.util.Arrays;
diff --git a/streaming/pom.xml b/streaming/pom.xml
index ff7b12b60338..86ee5bc8efdc 100644
--- a/streaming/pom.xml
+++ b/streaming/pom.xml
@@ -68,6 +68,11 @@
+
+ org.apache.hadoop
+ hadoop-hdfs
+ true
+
org.apache.hadoop
hadoop-mapreduce-client-core
diff --git a/streaming/src/test/org/apache/hive/streaming/TestStreaming.java b/streaming/src/test/org/apache/hive/streaming/TestStreaming.java
index 3655bf236af1..277081750cf7 100644
--- a/streaming/src/test/org/apache/hive/streaming/TestStreaming.java
+++ b/streaming/src/test/org/apache/hive/streaming/TestStreaming.java
@@ -78,6 +78,7 @@
import org.apache.hadoop.hive.metastore.utils.TestTxnDbUtil;
import org.apache.hadoop.hive.metastore.txn.TxnStore;
import org.apache.hadoop.hive.metastore.txn.TxnUtils;
+import org.apache.hadoop.hive.metastore.utils.TestTxnDbUtil;
import org.apache.hadoop.hive.ql.DriverFactory;
import org.apache.hadoop.hive.ql.IDriver;
import org.apache.hadoop.hive.ql.io.AcidDirectory;
@@ -110,6 +111,7 @@
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
+import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
@@ -1317,10 +1319,14 @@ public void testTransactionBatchEmptyCommit() throws Exception {
connection.close();
}
+ /**
+ * Starting with HDFS 3.3.1, the underlying system NOW SUPPORTS hflush so there should
+ * be no exception.
+ */
@Test
public void testTransactionBatchSizeValidation() throws Exception {
final String schemes = conf.get(HiveConf.ConfVars.HIVE_BLOBSTORE_SUPPORTED_SCHEMES.varname);
- // the output stream of this FS doesn't support hflush, so the below test will fail
+ // the output stream of this FS doesn't used to support hflush earlier, now it shouldn't throw any exception
conf.setVar(HiveConf.ConfVars.HIVE_BLOBSTORE_SUPPORTED_SCHEMES, "raw");
StrictDelimitedInputWriter writer = StrictDelimitedInputWriter.newBuilder()
@@ -1337,10 +1343,6 @@ public void testTransactionBatchSizeValidation() throws Exception {
.withHiveConf(conf)
.connect();
- Assert.fail();
- } catch (ConnectionError e) {
- Assert.assertTrue("Expected connection error due to batch sizes",
- e.getMessage().contains("only supports transaction batch"));
} finally {
conf.setVar(HiveConf.ConfVars.HIVE_BLOBSTORE_SUPPORTED_SCHEMES, schemes);
}