Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion common/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,6 @@
<dependency>
<groupId>org.eclipse.jetty</groupId>
<artifactId>jetty-http</artifactId>
<version>${jetty.version}</version>
</dependency>
<dependency>
<groupId>org.eclipse.jetty</groupId>
Expand Down Expand Up @@ -195,6 +194,11 @@
<artifactId>tez-api</artifactId>
<version>${tez.version}</version>
</dependency>
<dependency>
<groupId>org.fusesource.jansi</groupId>
<artifactId>jansi</artifactId>
<version>${jansi.version}</version>
</dependency>
<!-- test inter-project -->
<dependency>
<groupId>com.google.code.tempus-fugit</groupId>
Expand Down
16 changes: 16 additions & 0 deletions data/conf/hive-site.xml
Original file line number Diff line number Diff line change
Expand Up @@ -409,4 +409,20 @@
<name>hive.txn.xlock.ctas</name>
<value>false</value>
</property>

<property>
<!-- Set large for tests. This acts as an artifical LIMIT. See HIVE-24484 for details -->
<name>hive.server2.thrift.resultset.max.fetch.size</name>
<value>1000000</value>
</property>

<property>
<name>hive.server2.webui.max.threads</name>
<value>4</value>
</property>

<property>
<name>hive.async.cleanup.service.thread.count</name>
<value>4</value>
</property>
</configuration>
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
import org.apache.hadoop.hive.metastore.MetaStoreTestUtils;
Expand Down Expand Up @@ -279,6 +280,10 @@ public void testOutputFormat() throws Throwable {
infoList.add(OutputJobInfo.create("default", tableNames[1], partitionValues));
infoList.add(OutputJobInfo.create("default", tableNames[2], partitionValues));

// There are tests that check file permissions (which are manually set)
// Disable NN ACLS so that the manual permissions are observed
hiveConf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, false);

Job job = new Job(hiveConf, "SampleJob");

job.setMapperClass(MyMapper.class);
Expand Down Expand Up @@ -315,18 +320,18 @@ public void testOutputFormat() throws Throwable {

// Check permisssion on partition dirs and files created
for (int i = 0; i < tableNames.length; i++) {
Path partitionFile = new Path(warehousedir + "/" + tableNames[i]
+ "/ds=1/cluster=ag/part-m-00000");
FileSystem fs = partitionFile.getFileSystem(mrConf);
Assert.assertEquals("File permissions of table " + tableNames[i] + " is not correct",
fs.getFileStatus(partitionFile).getPermission(),
new FsPermission(tablePerms[i]));
Assert.assertEquals("File permissions of table " + tableNames[i] + " is not correct",
fs.getFileStatus(partitionFile.getParent()).getPermission(),
new FsPermission(tablePerms[i]));
Assert.assertEquals("File permissions of table " + tableNames[i] + " is not correct",
fs.getFileStatus(partitionFile.getParent().getParent()).getPermission(),
new FsPermission(tablePerms[i]));
final Path partitionFile = new Path(warehousedir + "/" + tableNames[i] + "/ds=1/cluster=ag/part-m-00000");

final FileSystem fs = partitionFile.getFileSystem(mrConf);

Assert.assertEquals("File permissions of table " + tableNames[i] + " is not correct [" + partitionFile + "]",
new FsPermission(tablePerms[i]), fs.getFileStatus(partitionFile).getPermission());
Assert.assertEquals(
"File permissions of table " + tableNames[i] + " is not correct [" + partitionFile + "]",
new FsPermission(tablePerms[i]), fs.getFileStatus(partitionFile).getPermission());
Assert.assertEquals(
"File permissions of table " + tableNames[i] + " is not correct [" + partitionFile.getParent() + "]",
new FsPermission(tablePerms[i]), fs.getFileStatus(partitionFile.getParent()).getPermission());

}
LOG.info("File permissions verified");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,11 +37,13 @@
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
import org.apache.hadoop.hive.metastore.MetaStoreTestUtils;
import org.apache.hadoop.hive.metastore.TransactionalMetaStoreEventListener;
import org.apache.hadoop.hive.metastore.Warehouse;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.NotificationEvent;
import org.apache.hadoop.hive.metastore.api.PartitionEventType;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
import org.apache.hadoop.hive.metastore.messaging.MessageEncoder;
import org.apache.hadoop.hive.metastore.messaging.json.JSONMessageEncoder;
import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat;
import org.apache.hadoop.hive.ql.io.RCFileInputFormat;
Expand Down Expand Up @@ -114,16 +116,16 @@ public static void startMetaStoreServer() throws Exception {
return;
}

// Set proxy user privilege and initialize the global state of ProxyUsers
Configuration conf = new Configuration();
conf.set("hadoop.proxyuser." + Utils.getUGI().getShortUserName() + ".hosts", "*");
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
Configuration conf = MetastoreConf.newMetastoreConf();

System.setProperty(HiveConf.ConfVars.METASTORE_TRANSACTIONAL_EVENT_LISTENERS.varname,
DbNotificationListener.class.getName()); // turn on db notification listener on metastore
System.setProperty(MetastoreConf.ConfVars.EVENT_MESSAGE_FACTORY.getHiveName(),
JSONMessageEncoder.class.getName());
msPort = MetaStoreTestUtils.startMetaStoreWithRetry();
// Disable proxy authorization white-list for testing
MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.EVENT_DB_NOTIFICATION_API_AUTH, false);

// turn on db notification listener on metastore
MetastoreConf.setClass(conf, MetastoreConf.ConfVars.TRANSACTIONAL_EVENT_LISTENERS, DbNotificationListener.class, TransactionalMetaStoreEventListener.class);
MetastoreConf.setClass(conf, MetastoreConf.ConfVars.EVENT_MESSAGE_FACTORY, JSONMessageEncoder.class, MessageEncoder.class);

msPort = MetaStoreTestUtils.startMetaStoreWithRetry(conf);
securityManager = System.getSecurityManager();
System.setSecurityManager(new NoExitSecurityManager());
Policy.setPolicy(new DerbyPolicy());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,14 +56,15 @@ static void internalBeforeClassSetup(Map<String, String> overrides, Class clazz)
throws Exception {
conf = new HiveConf(clazz);
conf.set("dfs.client.use.datanode.hostname", "true");
conf.set("hadoop.proxyuser." + Utils.getUGI().getShortUserName() + ".hosts", "*");
conf.set("hive.repl.cmrootdir", "/tmp/");
conf.set("dfs.namenode.acls.enabled", "true");
MiniDFSCluster miniDFSCluster =
new MiniDFSCluster.Builder(conf).numDataNodes(2).format(true).build();
Map<String, String> localOverrides = new HashMap<String, String>() {{
put("fs.defaultFS", miniDFSCluster.getFileSystem().getUri().toString());
put(HiveConf.ConfVars.HIVE_IN_TEST_REPL.varname, "true");
// Disable proxy authorization white-list for testing
put(MetastoreConf.ConfVars.EVENT_DB_NOTIFICATION_API_AUTH.getVarname(), "false");
}};
localOverrides.putAll(overrides);
setFullyQualifiedReplicaExternalTableBase(miniDFSCluster.getFileSystem());
Expand All @@ -88,7 +89,6 @@ static void internalBeforeClassSetupExclusiveReplica(Map<String, String> primary
replicaConf = new HiveConf(clazz);
replicaConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, replicaBaseDir);
replicaConf.set("dfs.client.use.datanode.hostname", "true");
replicaConf.set("hadoop.proxyuser." + Utils.getUGI().getShortUserName() + ".hosts", "*");
MiniDFSCluster miniReplicaDFSCluster =
new MiniDFSCluster.Builder(replicaConf).numDataNodes(2).format(true).build();

Expand All @@ -97,7 +97,6 @@ static void internalBeforeClassSetupExclusiveReplica(Map<String, String> primary
conf = new HiveConf(clazz);
conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, primaryBaseDir);
conf.set("dfs.client.use.datanode.hostname", "true");
conf.set("hadoop.proxyuser." + Utils.getUGI().getShortUserName() + ".hosts", "*");
MiniDFSCluster miniPrimaryDFSCluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).format(true).build();

// Setup primary warehouse.
Expand All @@ -106,6 +105,8 @@ static void internalBeforeClassSetupExclusiveReplica(Map<String, String> primary
localOverrides.put(HiveConf.ConfVars.HIVE_IN_TEST_REPL.varname, "true");
localOverrides.put(HiveConf.ConfVars.REPL_EXTERNAL_TABLE_BASE_DIR.varname, fullyQualifiedReplicaExternalBase);
localOverrides.put("fs.defaultFS", miniPrimaryDFSCluster.getFileSystem().getUri().toString());
// Disable proxy authorization white-list for testing
localOverrides.put(MetastoreConf.ConfVars.EVENT_DB_NOTIFICATION_API_AUTH.getVarname(), "false");
localOverrides.putAll(primaryOverrides);
primary = new WarehouseInstance(LOG, miniPrimaryDFSCluster, localOverrides);

Expand All @@ -114,6 +115,7 @@ static void internalBeforeClassSetupExclusiveReplica(Map<String, String> primary
localOverrides.put(HiveConf.ConfVars.REPL_EXTERNAL_TABLE_BASE_DIR.varname, fullyQualifiedReplicaExternalBase);
localOverrides.put("fs.defaultFS", miniReplicaDFSCluster.getFileSystem().getUri().toString());
localOverrides.put(HiveConf.ConfVars.HIVE_IN_TEST_REPL.varname, "true");
localOverrides.put(MetastoreConf.ConfVars.EVENT_DB_NOTIFICATION_API_AUTH.getVarname(), "false");
localOverrides.putAll(replicaOverrides);
replica = new WarehouseInstance(LOG, miniReplicaDFSCluster, localOverrides);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TestName;
Expand Down Expand Up @@ -60,8 +59,8 @@ public class TestReplicationOnHDFSEncryptedZones {
@BeforeClass
public static void beforeClassSetup() throws Exception {
System.setProperty("jceks.key.serialFilter", "java.lang.Enum;java.security.KeyRep;" +
"java.security.KeyRep$Type;javax.crypto.spec.SecretKeySpec;" +
"org.apache.hadoop.crypto.key.JavaKeyStoreProvider$KeyMetadata;!*");
"java.security.KeyRep$Type;javax.crypto.spec.SecretKeySpec;" +
"org.apache.hadoop.crypto.key.JavaKeyStoreProvider$KeyMetadata;!*");
conf = new Configuration();
conf.set("dfs.client.use.datanode.hostname", "true");
conf.set("hadoop.proxyuser." + Utils.getUGI().getShortUserName() + ".hosts", "*");
Expand Down Expand Up @@ -95,7 +94,7 @@ public void setup() throws Throwable {
primaryDbName = testName.getMethodName() + "_" + +System.currentTimeMillis();
replicatedDbName = "replicated_" + primaryDbName;
primary.run("create database " + primaryDbName + " WITH DBPROPERTIES ( '" +
SOURCE_OF_REPLICATION + "' = '1,2,3')");
SOURCE_OF_REPLICATION + "' = '1,2,3')");
}

@Test
Expand All @@ -109,74 +108,38 @@ public void targetAndSourceHaveDifferentEncryptionZoneKeys() throws Throwable {
replicaConf.setBoolean("dfs.namenode.delegation.token.always-use", true);

MiniDFSCluster miniReplicaDFSCluster =
new MiniDFSCluster.Builder(replicaConf).numDataNodes(2).format(true).build();
new MiniDFSCluster.Builder(replicaConf).numDataNodes(2).format(true).build();
replicaConf.setBoolean(METASTORE_AGGREGATE_STATS_CACHE_ENABLED.varname, false);

DFSTestUtil.createKey("test_key123", miniReplicaDFSCluster, replicaConf);

WarehouseInstance replica = new WarehouseInstance(LOG, miniReplicaDFSCluster,
new HashMap<String, String>() {{
put(HiveConf.ConfVars.HIVE_IN_TEST.varname, "false");
put(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS.varname, "false");
put(HiveConf.ConfVars.HIVE_DISTCP_DOAS_USER.varname,
UserGroupInformation.getCurrentUser().getUserName());
put(HiveConf.ConfVars.REPLDIR.varname, primary.repldDir);
}}, "test_key123");

List<String> dumpWithClause = Arrays.asList(
"'hive.repl.add.raw.reserved.namespace'='true'",
"'" + HiveConf.ConfVars.REPL_EXTERNAL_TABLE_BASE_DIR.varname + "'='"
+ replica.externalTableWarehouseRoot + "'",
"'distcp.options.skipcrccheck'=''",
"'" + HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS.varname + "'='false'",
"'" + HiveConf.ConfVars.HIVE_DISTCP_DOAS_USER.varname + "'='"
+ UserGroupInformation.getCurrentUser().getUserName() +"'");
WarehouseInstance.Tuple tuple =
primary.run("use " + primaryDbName)
.run("create table encrypted_table (id int, value string)")
.run("insert into table encrypted_table values (1,'value1')")
.run("insert into table encrypted_table values (2,'value2')")
.dump(primaryDbName, dumpWithClause);

replica
.run("repl load " + primaryDbName + " into " + replicatedDbName
+ " with('hive.repl.add.raw.reserved.namespace'='true', "
+ "'hive.repl.replica.external.table.base.dir'='" + replica.externalTableWarehouseRoot + "', "
+ "'hive.exec.copyfile.maxsize'='0', 'distcp.options.skipcrccheck'='')")
.run("use " + replicatedDbName)
.run("repl status " + replicatedDbName)
.verifyResult(tuple.lastReplicationId);

try {
replica
.run("select value from encrypted_table")
.verifyResults(new String[] { "value1", "value2" });
Assert.fail("Src EZKey shouldn't be present on target");
} catch (Throwable e) {
while (e.getCause() != null) {
e = e.getCause();
}
Assert.assertTrue(e.getMessage().contains("KeyVersion name 'test_key@0' does not exist"));
}
new HashMap<String, String>() {{
put(HiveConf.ConfVars.HIVE_IN_TEST.varname, "false");
put(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS.varname, "false");
put(HiveConf.ConfVars.HIVE_DISTCP_DOAS_USER.varname,
UserGroupInformation.getCurrentUser().getUserName());
put(HiveConf.ConfVars.REPLDIR.varname, primary.repldDir);
}}, "test_key123");

//read should pass without raw-byte distcp
dumpWithClause = Arrays.asList( "'" + HiveConf.ConfVars.REPL_EXTERNAL_TABLE_BASE_DIR.varname + "'='"
+ replica.externalTableWarehouseRoot + "'");
tuple = primary.run("use " + primaryDbName)
List<String> dumpWithClause = Arrays.asList( "'" + HiveConf.ConfVars.REPL_EXTERNAL_TABLE_BASE_DIR.varname + "'='"
+ replica.externalTableWarehouseRoot + "'");
WarehouseInstance.Tuple tuple =
primary.run("use " + primaryDbName)
.run("create external table encrypted_table2 (id int, value string)")
.run("insert into table encrypted_table2 values (1,'value1')")
.run("insert into table encrypted_table2 values (2,'value2')")
.dump(primaryDbName, dumpWithClause);

replica
.run("repl load " + primaryDbName + " into " + replicatedDbName
+ " with('hive.repl.replica.external.table.base.dir'='" + replica.externalTableWarehouseRoot + "', "
+ "'hive.exec.copyfile.maxsize'='0', 'distcp.options.skipcrccheck'='')")
.run("use " + replicatedDbName)
.run("repl status " + replicatedDbName)
.verifyResult(tuple.lastReplicationId)
.run("select value from encrypted_table2")
.verifyResults(new String[] { "value1", "value2" });
.run("repl load " + primaryDbName + " into " + replicatedDbName
+ " with('hive.repl.replica.external.table.base.dir'='" + replica.externalTableWarehouseRoot + "', "
+ "'hive.exec.copyfile.maxsize'='0', 'distcp.options.skipcrccheck'='')")
.run("use " + replicatedDbName)
.run("repl status " + replicatedDbName)
.run("select value from encrypted_table2")
.verifyResults(new String[] { "value1", "value2" });
}

@Test
Expand All @@ -190,7 +153,7 @@ public void targetAndSourceHaveSameEncryptionZoneKeys() throws Throwable {
replicaConf.setBoolean("dfs.namenode.delegation.token.always-use", true);

MiniDFSCluster miniReplicaDFSCluster =
new MiniDFSCluster.Builder(replicaConf).numDataNodes(2).format(true).build();
new MiniDFSCluster.Builder(replicaConf).numDataNodes(2).format(true).build();
replicaConf.setBoolean(METASTORE_AGGREGATE_STATS_CACHE_ENABLED.varname, false);

WarehouseInstance replica = new WarehouseInstance(LOG, miniReplicaDFSCluster,
Expand All @@ -203,13 +166,13 @@ public void targetAndSourceHaveSameEncryptionZoneKeys() throws Throwable {
}}, "test_key");

List<String> dumpWithClause = Arrays.asList(
"'hive.repl.add.raw.reserved.namespace'='true'",
"'" + HiveConf.ConfVars.REPL_EXTERNAL_TABLE_BASE_DIR.varname + "'='"
+ replica.externalTableWarehouseRoot + "'",
"'distcp.options.skipcrccheck'=''",
"'" + HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS.varname + "'='false'",
"'" + HiveConf.ConfVars.HIVE_DISTCP_DOAS_USER.varname + "'='"
+ UserGroupInformation.getCurrentUser().getUserName() +"'");
"'hive.repl.add.raw.reserved.namespace'='true'",
"'" + HiveConf.ConfVars.REPL_EXTERNAL_TABLE_BASE_DIR.varname + "'='"
+ replica.externalTableWarehouseRoot + "'",
"'distcp.options.skipcrccheck'=''",
"'" + HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS.varname + "'='false'",
"'" + HiveConf.ConfVars.HIVE_DISTCP_DOAS_USER.varname + "'='"
+ UserGroupInformation.getCurrentUser().getUserName() +"'");

WarehouseInstance.Tuple tuple =
primary.run("use " + primaryDbName)
Expand All @@ -229,4 +192,4 @@ public void targetAndSourceHaveSameEncryptionZoneKeys() throws Throwable {
.run("select value from encrypted_table")
.verifyResults(new String[] { "value1", "value2" });
}
}
}
Loading