From 4ad10b099bd83422567e5472b5b7ac240eed714b Mon Sep 17 00:00:00 2001 From: Zhongting Hu Date: Thu, 21 Mar 2019 10:57:02 -0700 Subject: [PATCH] Hive Meta Store impersonation access This commits squash the original commits from PR https://github.com/prestodb/presto/pull/13699 which includes the follow commits: HMS impersonation access refactoring to use HMS Authentication Module add Config for multiple hms instances Update HMS memory settings address review comments --- pom.xml | 6 + .../main/sphinx/connector/hive-security.rst | 2 + .../src/main/sphinx/connector/hive.rst | 1 + presto-hive-hadoop2/bin/run_hive_tests.sh | 5 + presto-hive-hadoop2/conf/docker-compose.yml | 1 + presto-hive-hadoop2/conf/files/hive-env.sh | 1 + .../presto/hive/MetastoreClientConfig.java | 29 + .../HiveMetastoreAuthentication.java | 13 + .../NoHiveMetastoreAuthentication.java | 13 + .../SemiTransactionalHiveMetastore.java | 281 +++++----- .../hive/metastore/thrift/HiveCluster.java | 7 +- .../thrift/HiveMetastoreApiStats.java | 6 + .../metastore/thrift/HiveMetastoreClient.java | 6 + .../thrift/HiveMetastoreClientFactory.java | 4 +- .../metastore/thrift/StaticHiveCluster.java | 24 +- .../thrift/StaticMetastoreConfig.java | 14 + .../metastore/thrift/ThriftHiveMetastore.java | 530 +++++++++--------- .../thrift/ThriftHiveMetastoreClient.java | 20 +- .../thrift/ThriftHiveMetastoreStats.java | 16 + .../metastore/thrift/ThriftMetastoreUtil.java | 22 +- .../hive/metastore/thrift/Transport.java | 11 +- .../metastore/TestCachingHiveMetastore.java | 22 +- .../metastore/TestMetastoreClientConfig.java | 10 +- .../thrift/MockHiveMetastoreClient.java | 14 + .../MockHiveMetastoreClientFactory.java | 2 +- .../thrift/TestStaticHiveCluster.java | 8 +- .../thrift/TestStaticMetastoreConfig.java | 11 +- .../metastore/thrift/TestingHiveCluster.java | 12 +- presto-hive/pom.xml | 5 + .../facebook/presto/hive/HiveMetadata.java | 133 ++--- .../presto/hive/HiveMetadataFactory.java | 14 +- .../presto/hive/HivePartitionManager.java | 17 +- .../presto/hive/HiveSplitManager.java | 3 +- .../facebook/presto/hive/HiveWriteUtils.java | 12 +- .../KerberosHiveMetastoreAuthentication.java | 119 +++- .../UserGroupInformationUtils.java | 4 +- .../hive/security/LegacyAccessControl.java | 2 +- .../security/SqlStandardAccessControl.java | 12 +- .../MetastoreHiveStatisticsProvider.java | 13 +- .../presto/hive/AbstractTestHiveClient.java | 117 ++-- .../hive/AbstractTestHiveFileSystem.java | 9 +- .../facebook/presto/hive/HiveTestUtils.java | 2 + ...TestingSemiTransactionalHiveMetastore.java | 78 +-- 43 files changed, 1013 insertions(+), 618 deletions(-) create mode 100644 presto-hive-hadoop2/conf/files/hive-env.sh diff --git a/pom.xml b/pom.xml index e1c0154e0045c..3370d4f6bf01c 100644 --- a/pom.xml +++ b/pom.xml @@ -1450,6 +1450,12 @@ 1.3.5-4 + + commons-codec + commons-codec + 1.10 + + org.apache.zookeeper zookeeper diff --git a/presto-docs/src/main/sphinx/connector/hive-security.rst b/presto-docs/src/main/sphinx/connector/hive-security.rst index f9d949b55dad0..08fb8239769c7 100644 --- a/presto-docs/src/main/sphinx/connector/hive-security.rst +++ b/presto-docs/src/main/sphinx/connector/hive-security.rst @@ -131,6 +131,8 @@ Property Name Description to the Hive metastore service. ``hive.metastore.client.keytab`` Hive metastore client keytab location. +``hive.metastore.impersonation.enabled`` Enable metastore end-user impersonation. +``hive.metastore.impersonation.user`` Default impersonation user when communicating with Hive Metastore ================================================== ============================================================ ``hive.metastore.authentication.type`` diff --git a/presto-docs/src/main/sphinx/connector/hive.rst b/presto-docs/src/main/sphinx/connector/hive.rst index 3eb2b7dfd691c..155e2ea383502 100644 --- a/presto-docs/src/main/sphinx/connector/hive.rst +++ b/presto-docs/src/main/sphinx/connector/hive.rst @@ -184,6 +184,7 @@ Property Name Description ``hive.s3select-pushdown.max-connections`` Maximum number of simultaneously open connections to S3 for 500 S3SelectPushdown. +``hive.metastore.multiple-instance.enabled`` Enable load balancing between multiple Metastore instances ================================================== ============================================================ ============ .. _s3selectpushdown: diff --git a/presto-hive-hadoop2/bin/run_hive_tests.sh b/presto-hive-hadoop2/bin/run_hive_tests.sh index 2b7a4b6eb667b..15312c1172fee 100755 --- a/presto-hive-hadoop2/bin/run_hive_tests.sh +++ b/presto-hive-hadoop2/bin/run_hive_tests.sh @@ -7,6 +7,11 @@ set -euo pipefail -x cleanup_docker_containers start_docker_containers +# restart HMS to pickup memory settings +exec_in_hadoop_master_container cp /etc/hadoop/conf/hive-env.sh /etc/hive/conf/hive-env.sh +exec_in_hadoop_master_container supervisorctl restart hive-metastore +retry check_hadoop + # generate test data exec_in_hadoop_master_container su hive -s /usr/bin/hive -f /files/sql/create-test.sql exec_in_hadoop_master_container su hive -s /usr/bin/hive -f /files/sql/create-test-hive13.sql diff --git a/presto-hive-hadoop2/conf/docker-compose.yml b/presto-hive-hadoop2/conf/docker-compose.yml index aa75a1727ecfa..888c5888b2eed 100644 --- a/presto-hive-hadoop2/conf/docker-compose.yml +++ b/presto-hive-hadoop2/conf/docker-compose.yml @@ -17,6 +17,7 @@ services: - ../../presto-hive/src/test/sql:/files/sql:ro - ./files/words:/usr/share/dict/words:ro - ./files/core-site.xml.s3-template:/etc/hadoop/conf/core-site.xml.s3-template:ro + - ./files/hive-env.sh:/etc/hadoop/conf/hive-env.sh:ro - ./files/test1.csv:/tmp/test1.csv:ro - ./files/test1.csv.gz:/tmp/test1.csv.gz:ro - ./files/test1.csv.lz4:/tmp/test1.csv.lz4:ro diff --git a/presto-hive-hadoop2/conf/files/hive-env.sh b/presto-hive-hadoop2/conf/files/hive-env.sh new file mode 100644 index 0000000000000..452c42faf7787 --- /dev/null +++ b/presto-hive-hadoop2/conf/files/hive-env.sh @@ -0,0 +1 @@ +export HADOOP_OPTS="$HADOOP_OPTS -Xmx1024m" \ No newline at end of file diff --git a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/MetastoreClientConfig.java b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/MetastoreClientConfig.java index 3a349b5f8be59..71b7e2a11a3e9 100644 --- a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/MetastoreClientConfig.java +++ b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/MetastoreClientConfig.java @@ -14,6 +14,7 @@ package com.facebook.presto.hive; import com.facebook.airlift.configuration.Config; +import com.facebook.airlift.configuration.ConfigDescription; import com.google.common.net.HostAndPort; import io.airlift.units.Duration; import io.airlift.units.MinDuration; @@ -41,6 +42,8 @@ public class MetastoreClientConfig private String recordingPath; private boolean replay; private Duration recordingDuration = new Duration(0, MINUTES); + private String metastoreDefaultImpersonationUser = ""; + private boolean metastoreImpersonationEnabled; public HostAndPort getMetastoreSocksProxy() { @@ -194,4 +197,30 @@ public MetastoreClientConfig setRequireHadoopNative(boolean requireHadoopNative) this.requireHadoopNative = requireHadoopNative; return this; } + + public boolean isMetastoreImpersonationEnabled() + { + return metastoreImpersonationEnabled; + } + + @Config("hive.metastore.impersonation.enabled") + @ConfigDescription("Should Presto user be impersonated when communicating with Hive Metastore") + public MetastoreClientConfig setMetastoreImpersonationEnabled(boolean metastoreImpersonationEnabled) + { + this.metastoreImpersonationEnabled = metastoreImpersonationEnabled; + return this; + } + + public String getMetastoreDefaultImpersonationUser() + { + return metastoreDefaultImpersonationUser; + } + + @Config("hive.metastore.impersonation.user") + @ConfigDescription("Default impersonation user when communicating with Hive Metastore") + public MetastoreClientConfig setMetastoreDefaultImpersonationUser(String metastoreDefaultImpersonationUser) + { + this.metastoreDefaultImpersonationUser = metastoreDefaultImpersonationUser; + return this; + } } diff --git a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/authentication/HiveMetastoreAuthentication.java b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/authentication/HiveMetastoreAuthentication.java index 67c2ba90297c1..e8c324c881f2e 100644 --- a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/authentication/HiveMetastoreAuthentication.java +++ b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/authentication/HiveMetastoreAuthentication.java @@ -18,4 +18,17 @@ public interface HiveMetastoreAuthentication { TTransport authenticate(TTransport rawTransport, String hiveMetastoreHost); + + TTransport authenticateWithToken(TTransport rawTransport, String tokenString); + + R doAs(String user, GenericExceptionAction action) + throws E; + + default void doAs(String user, Runnable action) + { + doAs(user, () -> { + action.run(); + return null; + }); + } } diff --git a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/authentication/NoHiveMetastoreAuthentication.java b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/authentication/NoHiveMetastoreAuthentication.java index 6d9cf99d0e698..718f8d5f04bd9 100644 --- a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/authentication/NoHiveMetastoreAuthentication.java +++ b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/authentication/NoHiveMetastoreAuthentication.java @@ -23,4 +23,17 @@ public TTransport authenticate(TTransport rawTransport, String hiveMetastoreHost { return rawTransport; } + + @Override + public TTransport authenticateWithToken(TTransport rawTransport, String tokenString) + { + return rawTransport; + } + + @Override + public R doAs(String user, GenericExceptionAction action) + throws E + { + return action.run(); + } } diff --git a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/SemiTransactionalHiveMetastore.java b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/SemiTransactionalHiveMetastore.java index a661e540b49bc..ce195e6d4cc15 100644 --- a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/SemiTransactionalHiveMetastore.java +++ b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/SemiTransactionalHiveMetastore.java @@ -21,12 +21,14 @@ import com.facebook.presto.hive.LocationHandle.WriteMode; import com.facebook.presto.hive.PartitionNotFoundException; import com.facebook.presto.hive.TableAlreadyExistsException; +import com.facebook.presto.hive.authentication.HiveMetastoreAuthentication; import com.facebook.presto.spi.ConnectorSession; import com.facebook.presto.spi.PrestoException; import com.facebook.presto.spi.SchemaTableName; import com.facebook.presto.spi.StandardErrorCode; import com.facebook.presto.spi.TableNotFoundException; import com.facebook.presto.spi.predicate.Domain; +import com.facebook.presto.spi.security.ConnectorIdentity; import com.facebook.presto.spi.security.PrestoPrincipal; import com.facebook.presto.spi.security.PrincipalType; import com.facebook.presto.spi.security.RoleGrant; @@ -106,6 +108,7 @@ public class SemiTransactionalHiveMetastore private final ExtendedHiveMetastore delegate; private final HdfsEnvironment hdfsEnvironment; private final ListeningExecutorService renameExecutor; + private final HiveMetastoreAuthentication metastoreAuthentication; private final boolean skipDeletionForAlter; private final boolean skipTargetCleanupOnRollback; @@ -125,43 +128,45 @@ public SemiTransactionalHiveMetastore( HdfsEnvironment hdfsEnvironment, ExtendedHiveMetastore delegate, ListeningExecutorService renameExecutor, + HiveMetastoreAuthentication metastoreAuthentication, boolean skipDeletionForAlter, boolean skipTargetCleanupOnRollback) { this.hdfsEnvironment = requireNonNull(hdfsEnvironment, "hdfsEnvironment is null"); this.delegate = requireNonNull(delegate, "delegate is null"); this.renameExecutor = requireNonNull(renameExecutor, "renameExecutor is null"); + this.metastoreAuthentication = requireNonNull(metastoreAuthentication, "metastoreAuthentication is null"); this.skipDeletionForAlter = skipDeletionForAlter; this.skipTargetCleanupOnRollback = skipTargetCleanupOnRollback; } - public synchronized List getAllDatabases() + public synchronized List getAllDatabases(ConnectorIdentity identity) { checkReadable(); - return delegate.getAllDatabases(); + return metastoreAuthentication.doAs(identity.getUser(), delegate::getAllDatabases); } - public synchronized Optional getDatabase(String databaseName) + public synchronized Optional getDatabase(ConnectorIdentity identity, String databaseName) { checkReadable(); - return delegate.getDatabase(databaseName); + return metastoreAuthentication.doAs(identity.getUser(), () -> delegate.getDatabase(databaseName)); } - public synchronized Optional> getAllTables(String databaseName) + public synchronized Optional> getAllTables(ConnectorIdentity identity, String databaseName) { checkReadable(); if (!tableActions.isEmpty()) { throw new UnsupportedOperationException("Listing all tables after adding/dropping/altering tables/views in a transaction is not supported"); } - return delegate.getAllTables(databaseName); + return metastoreAuthentication.doAs(identity.getUser(), () -> delegate.getAllTables(databaseName)); } - public synchronized Optional getTable(String databaseName, String tableName) + public synchronized Optional
getTable(ConnectorIdentity identity, String databaseName, String tableName) { checkReadable(); Action tableAction = tableActions.get(new SchemaTableName(databaseName, tableName)); if (tableAction == null) { - return delegate.getTable(databaseName, tableName); + return metastoreAuthentication.doAs(identity.getUser(), () -> delegate.getTable(databaseName, tableName)); } switch (tableAction.getType()) { case ADD: @@ -175,17 +180,17 @@ public synchronized Optional
getTable(String databaseName, String tableNa } } - public synchronized Set getSupportedColumnStatistics(Type type) + public synchronized Set getSupportedColumnStatistics(ConnectorIdentity identity, Type type) { - return delegate.getSupportedColumnStatistics(type); + return metastoreAuthentication.doAs(identity.getUser(), () -> delegate.getSupportedColumnStatistics(type)); } - public synchronized PartitionStatistics getTableStatistics(String databaseName, String tableName) + public synchronized PartitionStatistics getTableStatistics(ConnectorIdentity identity, String databaseName, String tableName) { checkReadable(); Action tableAction = tableActions.get(new SchemaTableName(databaseName, tableName)); if (tableAction == null) { - return delegate.getTableStatistics(databaseName, tableName); + return metastoreAuthentication.doAs(identity.getUser(), () -> delegate.getTableStatistics(databaseName, tableName)); } switch (tableAction.getType()) { case ADD: @@ -199,10 +204,10 @@ public synchronized PartitionStatistics getTableStatistics(String databaseName, } } - public synchronized Map getPartitionStatistics(String databaseName, String tableName, Set partitionNames) + public synchronized Map getPartitionStatistics(ConnectorIdentity identity, String databaseName, String tableName, Set partitionNames) { checkReadable(); - Optional
table = getTable(databaseName, tableName); + Optional
table = getTable(identity, databaseName, tableName); if (!table.isPresent()) { return ImmutableMap.of(); } @@ -230,7 +235,7 @@ public synchronized Map getPartitionStatistics(Stri } } - Map delegateResult = delegate.getPartitionStatistics(databaseName, tableName, partitionNamesToQuery.build()); + Map delegateResult = metastoreAuthentication.doAs(identity.getUser(), () -> delegate.getPartitionStatistics(databaseName, tableName, partitionNamesToQuery.build())); if (!delegateResult.isEmpty()) { resultBuilder.putAll(delegateResult); } @@ -267,10 +272,10 @@ private TableSource getTableSource(String databaseName, String tableName) } } - public synchronized HivePageSinkMetadata generatePageSinkMetadata(SchemaTableName schemaTableName) + public synchronized HivePageSinkMetadata generatePageSinkMetadata(ConnectorIdentity identity, SchemaTableName schemaTableName) { checkReadable(); - Optional
table = getTable(schemaTableName.getSchemaName(), schemaTableName.getTableName()); + Optional
table = getTable(identity, schemaTableName.getSchemaName(), schemaTableName.getTableName()); if (!table.isPresent()) { return new HivePageSinkMetadata(schemaTableName, Optional.empty(), ImmutableMap.of()); } @@ -292,47 +297,46 @@ public synchronized HivePageSinkMetadata generatePageSinkMetadata(SchemaTableNam modifiedPartitionMap); } - public synchronized Optional> getAllViews(String databaseName) + public synchronized Optional> getAllViews(ConnectorIdentity identity, String databaseName) { checkReadable(); if (!tableActions.isEmpty()) { throw new UnsupportedOperationException("Listing all tables after adding/dropping/altering tables/views in a transaction is not supported"); } - return delegate.getAllViews(databaseName); + return metastoreAuthentication.doAs(identity.getUser(), () -> delegate.getAllViews(databaseName)); } - public synchronized void createDatabase(Database database) + public synchronized void createDatabase(ConnectorIdentity identity, Database database) { - setExclusive((delegate, hdfsEnvironment) -> delegate.createDatabase(database)); + setExclusive((delegate, hdfsEnvironment, metastoreAuthentication) -> metastoreAuthentication.doAs(identity.getUser(), () -> delegate.createDatabase(database))); } - public synchronized void dropDatabase(String schemaName) + public synchronized void dropDatabase(ConnectorIdentity identity, String schemaName) { - setExclusive((delegate, hdfsEnvironment) -> delegate.dropDatabase(schemaName)); + setExclusive((delegate, hdfsEnvironment, metastoreAuthentication) -> metastoreAuthentication.doAs(identity.getUser(), () -> delegate.dropDatabase(schemaName))); } - public synchronized void renameDatabase(String source, String target) + public synchronized void renameDatabase(ConnectorIdentity identity, String source, String target) { - setExclusive((delegate, hdfsEnvironment) -> delegate.renameDatabase(source, target)); + setExclusive((delegate, hdfsEnvironment, metastoreAuthentication) -> metastoreAuthentication.doAs(identity.getUser(), () -> delegate.renameDatabase(source, target))); } // TODO: Allow updating statistics for 2 tables in the same transaction - public synchronized void setTableStatistics(Table table, PartitionStatistics tableStatistics) + public synchronized void setTableStatistics(ConnectorIdentity identity, Table table, PartitionStatistics tableStatistics) { - setExclusive((delegate, hdfsEnvironment) -> - delegate.updateTableStatistics(table.getDatabaseName(), table.getTableName(), statistics -> updatePartitionStatistics(statistics, tableStatistics))); + setExclusive((delegate, hdfsEnvironment, metastoreAuthentication) -> metastoreAuthentication.doAs(identity.getUser(), () -> delegate.updateTableStatistics(table.getDatabaseName(), table.getTableName(), statistics -> updatePartitionStatistics(statistics, tableStatistics)))); } // TODO: Allow updating statistics for 2 tables in the same transaction - public synchronized void setPartitionStatistics(Table table, Map, PartitionStatistics> partitionStatisticsMap) + public synchronized void setPartitionStatistics(ConnectorIdentity identity, Table table, Map, PartitionStatistics> partitionStatisticsMap) { - setExclusive((delegate, hdfsEnvironment) -> + setExclusive((delegate, hdfsEnvironment, metastoreAuthentication) -> metastoreAuthentication.doAs(identity.getUser(), () -> partitionStatisticsMap.forEach((partitionValues, newPartitionStats) -> delegate.updatePartitionStatistics( table.getDatabaseName(), table.getTableName(), getPartitionName(table, partitionValues), - oldPartitionStats -> updatePartitionStatistics(oldPartitionStats, newPartitionStats)))); + oldPartitionStats -> updatePartitionStatistics(oldPartitionStats, newPartitionStats))))); } // For HiveBasicStatistics, we only overwrite the original statistics if the new one is not empty. @@ -413,29 +417,29 @@ public synchronized void dropTable(ConnectorSession session, String databaseName } } - public synchronized void replaceView(String databaseName, String tableName, Table table, PrincipalPrivileges principalPrivileges) + public synchronized void replaceView(ConnectorIdentity identity, String databaseName, String tableName, Table table, PrincipalPrivileges principalPrivileges) { - setExclusive((delegate, hdfsEnvironment) -> delegate.replaceTable(databaseName, tableName, table, principalPrivileges)); + setExclusive((delegate, hdfsEnvironment, metastoreAuthentication) -> metastoreAuthentication.doAs(identity.getUser(), () -> delegate.replaceTable(databaseName, tableName, table, principalPrivileges))); } - public synchronized void renameTable(String databaseName, String tableName, String newDatabaseName, String newTableName) + public synchronized void renameTable(ConnectorIdentity identity, String databaseName, String tableName, String newDatabaseName, String newTableName) { - setExclusive((delegate, hdfsEnvironment) -> delegate.renameTable(databaseName, tableName, newDatabaseName, newTableName)); + setExclusive((delegate, hdfsEnvironment, metastoreAuthentication) -> metastoreAuthentication.doAs(identity.getUser(), () -> delegate.renameTable(databaseName, tableName, newDatabaseName, newTableName))); } - public synchronized void addColumn(String databaseName, String tableName, String columnName, HiveType columnType, String columnComment) + public synchronized void addColumn(ConnectorIdentity identity, String databaseName, String tableName, String columnName, HiveType columnType, String columnComment) { - setExclusive((delegate, hdfsEnvironment) -> delegate.addColumn(databaseName, tableName, columnName, columnType, columnComment)); + setExclusive((delegate, hdfsEnvironment, metastoreAuthentication) -> metastoreAuthentication.doAs(identity.getUser(), () -> delegate.addColumn(databaseName, tableName, columnName, columnType, columnComment))); } - public synchronized void renameColumn(String databaseName, String tableName, String oldColumnName, String newColumnName) + public synchronized void renameColumn(ConnectorIdentity identity, String databaseName, String tableName, String oldColumnName, String newColumnName) { - setExclusive((delegate, hdfsEnvironment) -> delegate.renameColumn(databaseName, tableName, oldColumnName, newColumnName)); + setExclusive((delegate, hdfsEnvironment, metastoreAuthentication) -> metastoreAuthentication.doAs(identity.getUser(), () -> delegate.renameColumn(databaseName, tableName, oldColumnName, newColumnName))); } - public synchronized void dropColumn(String databaseName, String tableName, String columnName) + public synchronized void dropColumn(ConnectorIdentity identity, String databaseName, String tableName, String columnName) { - setExclusive((delegate, hdfsEnvironment) -> delegate.dropColumn(databaseName, tableName, columnName)); + setExclusive((delegate, hdfsEnvironment, metastoreAuthentication) -> metastoreAuthentication.doAs(identity.getUser(), () -> delegate.dropColumn(databaseName, tableName, columnName))); } public synchronized void finishInsertIntoExistingTable( @@ -452,9 +456,9 @@ public synchronized void finishInsertIntoExistingTable( SchemaTableName schemaTableName = new SchemaTableName(databaseName, tableName); Action oldTableAction = tableActions.get(schemaTableName); if (oldTableAction == null || oldTableAction.getData().getTable().getTableType().equals(TEMPORARY_TABLE)) { - Table table = getTable(databaseName, tableName) + Table table = getTable(session.getIdentity(), databaseName, tableName) .orElseThrow(() -> new TableNotFoundException(schemaTableName)); - PartitionStatistics currentStatistics = getTableStatistics(databaseName, tableName); + PartitionStatistics currentStatistics = getTableStatistics(session.getIdentity(), databaseName, tableName); HdfsContext context = new HdfsContext(session, databaseName, tableName); tableActions.put( schemaTableName, @@ -487,7 +491,7 @@ public synchronized void finishInsertIntoExistingTable( public synchronized void truncateUnpartitionedTable(ConnectorSession session, String databaseName, String tableName) { checkReadable(); - Optional
table = getTable(databaseName, tableName); + Optional
table = getTable(session.getIdentity(), databaseName, tableName); SchemaTableName schemaTableName = new SchemaTableName(databaseName, tableName); if (!table.isPresent()) { throw new TableNotFoundException(schemaTableName); @@ -501,7 +505,7 @@ public synchronized void truncateUnpartitionedTable(ConnectorSession session, St Path path = new Path(table.get().getStorage().getLocation()); HdfsContext context = new HdfsContext(session, databaseName, tableName); - setExclusive((delegate, hdfsEnvironment) -> { + setExclusive((delegate, hdfsEnvironment, metastoreAuthentication) -> { RecursiveDeleteResult recursiveDeleteResult = recursiveDeleteFiles(hdfsEnvironment, context, path, ImmutableList.of(""), false); if (!recursiveDeleteResult.getNotDeletedEligibleItems().isEmpty()) { throw new PrestoException(HIVE_FILESYSTEM_ERROR, format( @@ -512,18 +516,19 @@ public synchronized void truncateUnpartitionedTable(ConnectorSession session, St }); } - public synchronized Optional> getPartitionNames(String databaseName, String tableName) + public synchronized Optional> getPartitionNames(ConnectorIdentity identity, String databaseName, String tableName) { - return doGetPartitionNames(databaseName, tableName, ImmutableMap.of()); + return doGetPartitionNames(identity, databaseName, tableName, ImmutableMap.of()); } - public synchronized Optional> getPartitionNamesByFilter(String databaseName, String tableName, Map effectivePredicate) + public synchronized Optional> getPartitionNamesByFilter(ConnectorIdentity identity, String databaseName, String tableName, Map effectivePredicate) { - return doGetPartitionNames(databaseName, tableName, effectivePredicate); + return doGetPartitionNames(identity, databaseName, tableName, effectivePredicate); } @GuardedBy("this") private Optional> doGetPartitionNames( + ConnectorIdentity identity, String databaseName, String tableName, Map partitionPredicates) @@ -531,7 +536,7 @@ private Optional> doGetPartitionNames( checkHoldsLock(); checkReadable(); - Optional
table = getTable(databaseName, tableName); + Optional
table = getTable(identity, databaseName, tableName); if (!table.isPresent()) { return Optional.empty(); } @@ -615,7 +620,7 @@ private static boolean partitionValuesMatch(List values, List pa return true; } - public synchronized Optional getPartition(String databaseName, String tableName, List partitionValues) + public synchronized Optional getPartition(ConnectorIdentity identity, String databaseName, String tableName, List partitionValues) { checkReadable(); TableSource tableSource = getTableSource(databaseName, tableName); @@ -626,7 +631,7 @@ public synchronized Optional getPartition(String databaseName, String } switch (tableSource) { case PRE_EXISTING_TABLE: - return delegate.getPartition(databaseName, tableName, partitionValues); + return hdfsEnvironment.doAs(identity.getUser(), () -> delegate.getPartition(databaseName, tableName, partitionValues)); case CREATED_IN_THIS_TRANSACTION: return Optional.empty(); default: @@ -634,7 +639,7 @@ public synchronized Optional getPartition(String databaseName, String } } - public synchronized Map> getPartitionsByNames(String databaseName, String tableName, List partitionNames) + public synchronized Map> getPartitionsByNames(ConnectorIdentity identity, String databaseName, String tableName, List partitionNames) { checkReadable(); TableSource tableSource = getTableSource(databaseName, tableName); @@ -660,7 +665,7 @@ public synchronized Map> getPartitionsByNames(String resultBuilder.put(partitionName, getPartitionFromPartitionAction(partitionAction)); } } - Map> delegateResult = delegate.getPartitionsByNames(databaseName, tableName, partitionNamesToQuery.build()); + Map> delegateResult = hdfsEnvironment.doAs(identity.getUser(), () -> delegate.getPartitionsByNames(databaseName, tableName, partitionNamesToQuery.build())); resultBuilder.putAll(delegateResult); return resultBuilder.build(); } @@ -755,10 +760,10 @@ public synchronized void finishInsertIntoExistingPartition( Map, Action> partitionActionsOfTable = partitionActions.computeIfAbsent(schemaTableName, k -> new HashMap<>()); Action oldPartitionAction = partitionActionsOfTable.get(partitionValues); if (oldPartitionAction == null) { - Partition partition = delegate.getPartition(databaseName, tableName, partitionValues) - .orElseThrow(() -> new PartitionNotFoundException(schemaTableName, partitionValues)); - String partitionName = getPartitionName(databaseName, tableName, partitionValues); - PartitionStatistics currentStatistics = delegate.getPartitionStatistics(databaseName, tableName, ImmutableSet.of(partitionName)).get(partitionName); + Partition partition = hdfsEnvironment.doAs(session.getIdentity().getUser(), () -> delegate.getPartition(databaseName, tableName, partitionValues) + .orElseThrow(() -> new PartitionNotFoundException(schemaTableName, partitionValues))); + String partitionName = getPartitionName(session.getIdentity(), databaseName, tableName, partitionValues); + PartitionStatistics currentStatistics = hdfsEnvironment.doAs(session.getIdentity().getUser(), () -> delegate.getPartitionStatistics(databaseName, tableName, ImmutableSet.of(partitionName)).get(partitionName)); if (currentStatistics == null) { throw new PrestoException(HIVE_METASTORE_ERROR, "currentStatistics is null"); } @@ -789,9 +794,9 @@ public synchronized void finishInsertIntoExistingPartition( } } - private String getPartitionName(String databaseName, String tableName, List partitionValues) + private String getPartitionName(ConnectorIdentity identity, String databaseName, String tableName, List partitionValues) { - Table table = getTable(databaseName, tableName) + Table table = getTable(identity, databaseName, tableName) .orElseThrow(() -> new TableNotFoundException(new SchemaTableName(databaseName, tableName))); return getPartitionName(table, partitionValues); } @@ -804,45 +809,45 @@ private String getPartitionName(Table table, List partitionValues) return makePartName(columnNames, partitionValues); } - public synchronized void createRole(String role, String grantor) + public synchronized void createRole(ConnectorIdentity identity, String role, String grantor) { - setExclusive((delegate, hdfsEnvironment) -> delegate.createRole(role, grantor)); + setExclusive((delegate, hdfsEnvironment, metastoreAuthentication) -> metastoreAuthentication.doAs(identity.getUser(), () -> delegate.createRole(role, grantor))); } - public synchronized void dropRole(String role) + public synchronized void dropRole(ConnectorIdentity identity, String role) { - setExclusive((delegate, hdfsEnvironment) -> delegate.dropRole(role)); + setExclusive((delegate, hdfsEnvironment, metastoreAuthentication) -> metastoreAuthentication.doAs(identity.getUser(), () -> delegate.dropRole(role))); } - public synchronized Set listRoles() + public synchronized Set listRoles(ConnectorIdentity identity) { checkReadable(); - return delegate.listRoles(); + return hdfsEnvironment.doAs(identity.getUser(), delegate::listRoles); } - public synchronized void grantRoles(Set roles, Set grantees, boolean withAdminOption, PrestoPrincipal grantor) + public synchronized void grantRoles(ConnectorIdentity identity, Set roles, Set grantees, boolean withAdminOption, PrestoPrincipal grantor) { - setExclusive((delegate, hdfsEnvironment) -> delegate.grantRoles(roles, grantees, withAdminOption, grantor)); + setExclusive((delegate, hdfsEnvironment, metastoreAuthentication) -> metastoreAuthentication.doAs(identity.getUser(), () -> delegate.grantRoles(roles, grantees, withAdminOption, grantor))); } - public synchronized void revokeRoles(Set roles, Set grantees, boolean adminOptionFor, PrestoPrincipal grantor) + public synchronized void revokeRoles(ConnectorIdentity identity, Set roles, Set grantees, boolean adminOptionFor, PrestoPrincipal grantor) { - setExclusive((delegate, hdfsEnvironment) -> delegate.revokeRoles(roles, grantees, adminOptionFor, grantor)); + setExclusive((delegate, hdfsEnvironment, metastoreAuthentication) -> metastoreAuthentication.doAs(identity.getUser(), () -> delegate.revokeRoles(roles, grantees, adminOptionFor, grantor))); } - public synchronized Set listRoleGrants(PrestoPrincipal principal) + public synchronized Set listRoleGrants(ConnectorIdentity identity, PrestoPrincipal principal) { checkReadable(); - return delegate.listRoleGrants(principal); + return hdfsEnvironment.doAs(identity.getUser(), () -> delegate.listRoleGrants(principal)); } - public synchronized Set listTablePrivileges(String databaseName, String tableName, PrestoPrincipal principal) + public synchronized Set listTablePrivileges(ConnectorIdentity identity, String databaseName, String tableName, PrestoPrincipal principal) { checkReadable(); SchemaTableName schemaTableName = new SchemaTableName(databaseName, tableName); Action tableAction = tableActions.get(schemaTableName); if (tableAction == null) { - return delegate.listTablePrivileges(databaseName, tableName, principal); + return hdfsEnvironment.doAs(identity.getUser(), () -> delegate.listTablePrivileges(databaseName, tableName, principal)); } switch (tableAction.getType()) { case ADD: @@ -860,7 +865,7 @@ public synchronized Set listTablePrivileges(String databaseNa .build(); } case INSERT_EXISTING: - return delegate.listTablePrivileges(databaseName, tableName, principal); + return hdfsEnvironment.doAs(identity.getUser(), () -> delegate.listTablePrivileges(databaseName, tableName, principal)); case DROP: throw new TableNotFoundException(schemaTableName); default: @@ -868,14 +873,14 @@ public synchronized Set listTablePrivileges(String databaseNa } } - public synchronized void grantTablePrivileges(String databaseName, String tableName, PrestoPrincipal grantee, Set privileges) + public synchronized void grantTablePrivileges(ConnectorIdentity identity, String databaseName, String tableName, PrestoPrincipal grantee, Set privileges) { - setExclusive((delegate, hdfsEnvironment) -> delegate.grantTablePrivileges(databaseName, tableName, grantee, privileges)); + setExclusive((delegate, hdfsEnvironment, metastoreAuthentication) -> metastoreAuthentication.doAs(identity.getUser(), () -> delegate.grantTablePrivileges(databaseName, tableName, grantee, privileges))); } - public synchronized void revokeTablePrivileges(String databaseName, String tableName, PrestoPrincipal grantee, Set privileges) + public synchronized void revokeTablePrivileges(ConnectorIdentity identity, String databaseName, String tableName, PrestoPrincipal grantee, Set privileges) { - setExclusive((delegate, hdfsEnvironment) -> delegate.revokeTablePrivileges(databaseName, tableName, grantee, privileges)); + setExclusive((delegate, hdfsEnvironment, metastoreAuthentication) -> metastoreAuthentication.doAs(identity.getUser(), () -> delegate.revokeTablePrivileges(databaseName, tableName, grantee, privileges))); } public synchronized void declareIntentionToWrite( @@ -909,7 +914,7 @@ public synchronized void commit() break; case EXCLUSIVE_OPERATION_BUFFERED: requireNonNull(bufferedExclusiveOperation, "bufferedExclusiveOperation is null"); - bufferedExclusiveOperation.execute(delegate, hdfsEnvironment); + bufferedExclusiveOperation.execute(delegate, hdfsEnvironment, metastoreAuthentication); break; case FINISHED: throw new IllegalStateException("Tried to commit buffered metastore operations after transaction has been committed/aborted"); @@ -955,16 +960,16 @@ private void commitShared() Action action = entry.getValue(); switch (action.getType()) { case DROP: - committer.prepareDropTable(schemaTableName); + committer.prepareDropTable(action.getContext().getIdentity(), schemaTableName); break; case ALTER: committer.prepareAlterTable(); break; case ADD: - committer.prepareAddTable(action.getContext(), action.getData()); + committer.prepareAddTable(action.getContext().getIdentity(), action.getContext(), action.getData()); break; case INSERT_EXISTING: - committer.prepareInsertExistingTable(action.getContext(), action.getData()); + committer.prepareInsertExistingTable(action.getContext().getIdentity(), action.getContext(), action.getData()); break; default: throw new IllegalStateException("Unknown action type"); @@ -977,16 +982,16 @@ private void commitShared() Action action = partitionEntry.getValue(); switch (action.getType()) { case DROP: - committer.prepareDropPartition(schemaTableName, partitionValues); + committer.prepareDropPartition(action.getContext().getIdentity(), schemaTableName, partitionValues); break; case ALTER: - committer.prepareAlterPartition(action.getContext(), action.getData()); + committer.prepareAlterPartition(action.getContext().getIdentity(), action.getContext(), action.getData()); break; case ADD: - committer.prepareAddPartition(action.getContext(), action.getData()); + committer.prepareAddPartition(action.getContext().getIdentity(), action.getContext(), action.getData()); break; case INSERT_EXISTING: - committer.prepareInsertExistingPartition(action.getContext(), action.getData()); + committer.prepareInsertExistingPartition(action.getContext().getIdentity(), action.getContext(), action.getData()); break; default: throw new IllegalStateException("Unknown action type"); @@ -1097,11 +1102,11 @@ private List> getFileRenameFutures() return ImmutableList.copyOf(fileRenameFutures); } - private void prepareDropTable(SchemaTableName schemaTableName) + private void prepareDropTable(ConnectorIdentity identity, SchemaTableName schemaTableName) { metastoreDeleteOperations.add(new IrreversibleMetastoreOperation( format("drop table %s", schemaTableName), - () -> delegate.dropTable(schemaTableName.getSchemaName(), schemaTableName.getTableName(), true))); + () -> hdfsEnvironment.doAs(identity.getUser(), () -> delegate.dropTable(schemaTableName.getSchemaName(), schemaTableName.getTableName(), true)))); } private void prepareAlterTable() @@ -1114,7 +1119,7 @@ private void prepareAlterTable() throw new UnsupportedOperationException("Dropping and then creating a table with the same name is not supported"); } - private void prepareAddTable(HdfsContext context, TableAndMore tableAndMore) + private void prepareAddTable(ConnectorIdentity identity, HdfsContext context, TableAndMore tableAndMore) { deleteOnly = false; @@ -1166,9 +1171,9 @@ private void prepareAddTable(HdfsContext context, TableAndMore tableAndMore) } } } - addTableOperations.add(new CreateTableOperation(table, tableAndMore.getPrincipalPrivileges(), tableAndMore.isIgnoreExisting())); + addTableOperations.add(new CreateTableOperation(identity, table, tableAndMore.getPrincipalPrivileges(), tableAndMore.isIgnoreExisting())); if (!isPrestoView(table)) { - updateStatisticsOperations.add(new UpdateStatisticsOperation( + updateStatisticsOperations.add(new UpdateStatisticsOperation(identity, new SchemaTableName(table.getDatabaseName(), table.getTableName()), Optional.empty(), tableAndMore.getStatisticsUpdate(), @@ -1176,7 +1181,7 @@ private void prepareAddTable(HdfsContext context, TableAndMore tableAndMore) } } - private void prepareInsertExistingTable(HdfsContext context, TableAndMore tableAndMore) + private void prepareInsertExistingTable(ConnectorIdentity identity, HdfsContext context, TableAndMore tableAndMore) { deleteOnly = false; @@ -1193,21 +1198,21 @@ private void prepareInsertExistingTable(HdfsContext context, TableAndMore tableA if (!targetPath.equals(currentPath)) { asyncRename(hdfsEnvironment, renameExecutor, fileRenameCancelled, fileRenameFutures, context, currentPath, targetPath, tableAndMore.getFileNames().get()); } - updateStatisticsOperations.add(new UpdateStatisticsOperation( + updateStatisticsOperations.add(new UpdateStatisticsOperation(identity, new SchemaTableName(table.getDatabaseName(), table.getTableName()), Optional.empty(), tableAndMore.getStatisticsUpdate(), true)); } - private void prepareDropPartition(SchemaTableName schemaTableName, List partitionValues) + private void prepareDropPartition(ConnectorIdentity identity, SchemaTableName schemaTableName, List partitionValues) { metastoreDeleteOperations.add(new IrreversibleMetastoreOperation( format("drop partition %s.%s %s", schemaTableName.getSchemaName(), schemaTableName.getTableName(), partitionValues), - () -> delegate.dropPartition(schemaTableName.getSchemaName(), schemaTableName.getTableName(), partitionValues, true))); + () -> hdfsEnvironment.doAs(identity.getUser(), () -> delegate.dropPartition(schemaTableName.getSchemaName(), schemaTableName.getTableName(), partitionValues, true)))); } - private void prepareAlterPartition(HdfsContext context, PartitionAndMore partitionAndMore) + private void prepareAlterPartition(ConnectorIdentity identity, HdfsContext context, PartitionAndMore partitionAndMore) { deleteOnly = false; @@ -1219,7 +1224,7 @@ private void prepareAlterPartition(HdfsContext context, PartitionAndMore partiti TRANSACTION_CONFLICT, format("The partition that this transaction modified was deleted in another transaction. %s %s", partition.getTableName(), partition.getValues())); } - String partitionName = getPartitionName(partition.getDatabaseName(), partition.getTableName(), partition.getValues()); + String partitionName = getPartitionName(identity, partition.getDatabaseName(), partition.getTableName(), partition.getValues()); PartitionStatistics oldPartitionStatistics = getExistingPartitionStatistics(partition, partitionName); String oldPartitionLocation = oldPartition.get().getStorage().getLocation(); Path oldPartitionPath = new Path(oldPartitionLocation); @@ -1261,7 +1266,7 @@ private void prepareAlterPartition(HdfsContext context, PartitionAndMore partiti } // Partition alter must happen regardless of whether original and current location is the same // because metadata might change: e.g. storage format, column types, etc - alterPartitionOperations.add(new AlterPartitionOperation( + alterPartitionOperations.add(new AlterPartitionOperation(identity, new PartitionWithStatistics(partition, partitionName, partitionAndMore.getStatisticsUpdate()), new PartitionWithStatistics(oldPartition.get(), partitionName, oldPartitionStatistics))); } @@ -1292,7 +1297,7 @@ private PartitionStatistics getExistingPartitionStatistics(Partition partition, } } - private void prepareAddPartition(HdfsContext context, PartitionAndMore partitionAndMore) + private void prepareAddPartition(ConnectorIdentity identity, HdfsContext context, PartitionAndMore partitionAndMore) { deleteOnly = false; @@ -1304,7 +1309,7 @@ private void prepareAddPartition(HdfsContext context, PartitionAndMore partition SchemaTableName schemaTableName = new SchemaTableName(partition.getDatabaseName(), partition.getTableName()); PartitionAdder partitionAdder = partitionAdders.computeIfAbsent( schemaTableName, - ignored -> new PartitionAdder(partition.getDatabaseName(), partition.getTableName(), delegate, PARTITION_COMMIT_BATCH_SIZE)); + ignored -> new PartitionAdder(identity, partition.getDatabaseName(), partition.getTableName(), delegate, PARTITION_COMMIT_BATCH_SIZE)); if (pathExists(context, hdfsEnvironment, currentPath)) { if (!targetPath.equals(currentPath)) { @@ -1320,11 +1325,11 @@ private void prepareAddPartition(HdfsContext context, PartitionAndMore partition cleanUpTasksForAbort.add(new DirectoryCleanUpTask(context, targetPath, true)); createDirectory(context, hdfsEnvironment, targetPath); } - String partitionName = getPartitionName(partition.getDatabaseName(), partition.getTableName(), partition.getValues()); + String partitionName = getPartitionName(identity, partition.getDatabaseName(), partition.getTableName(), partition.getValues()); partitionAdder.addPartition(new PartitionWithStatistics(partition, partitionName, partitionAndMore.getStatisticsUpdate())); } - private void prepareInsertExistingPartition(HdfsContext context, PartitionAndMore partitionAndMore) + private void prepareInsertExistingPartition(ConnectorIdentity identity, HdfsContext context, PartitionAndMore partitionAndMore) { deleteOnly = false; @@ -1335,9 +1340,9 @@ private void prepareInsertExistingPartition(HdfsContext context, PartitionAndMor if (!targetPath.equals(currentPath)) { asyncRename(hdfsEnvironment, renameExecutor, fileRenameCancelled, fileRenameFutures, context, currentPath, targetPath, partitionAndMore.getFileNames()); } - updateStatisticsOperations.add(new UpdateStatisticsOperation( + updateStatisticsOperations.add(new UpdateStatisticsOperation(identity, new SchemaTableName(partition.getDatabaseName(), partition.getTableName()), - Optional.of(getPartitionName(partition.getDatabaseName(), partition.getTableName(), partition.getValues())), + Optional.of(getPartitionName(identity, partition.getDatabaseName(), partition.getTableName(), partition.getValues())), partitionAndMore.getStatisticsUpdate(), true)); } @@ -1408,35 +1413,35 @@ private void cancelUnstartedAsyncRenames() private void executeAddTableOperations() { for (CreateTableOperation addTableOperation : addTableOperations) { - addTableOperation.run(delegate); + addTableOperation.run(delegate, hdfsEnvironment); } } private void executeAlterPartitionOperations() { for (AlterPartitionOperation alterPartitionOperation : alterPartitionOperations) { - alterPartitionOperation.run(delegate); + alterPartitionOperation.run(delegate, hdfsEnvironment); } } private void executeAddPartitionOperations() { for (PartitionAdder partitionAdder : partitionAdders.values()) { - partitionAdder.execute(); + partitionAdder.execute(hdfsEnvironment); } } private void executeUpdateStatisticsOperations() { for (UpdateStatisticsOperation operation : updateStatisticsOperations) { - operation.run(delegate); + operation.run(delegate, hdfsEnvironment); } } private void undoAddPartitionOperations() { for (PartitionAdder partitionAdder : partitionAdders.values()) { - List> partitionsFailedToRollback = partitionAdder.rollback(); + List> partitionsFailedToRollback = partitionAdder.rollback(hdfsEnvironment); if (!partitionsFailedToRollback.isEmpty()) { logCleanupFailure("Failed to rollback: add_partition for partitions %s.%s %s", partitionAdder.getSchemaName(), @@ -1462,7 +1467,7 @@ private void undoAlterPartitionOperations() { for (AlterPartitionOperation alterPartitionOperation : alterPartitionOperations) { try { - alterPartitionOperation.undo(delegate); + alterPartitionOperation.undo(delegate, hdfsEnvironment); } catch (Throwable throwable) { logCleanupFailure(throwable, "failed to rollback: %s", alterPartitionOperation.getDescription()); @@ -1474,7 +1479,7 @@ private void undoUpdateStatisticsOperations() { for (UpdateStatisticsOperation operation : updateStatisticsOperations) { try { - operation.undo(delegate); + operation.undo(delegate, hdfsEnvironment); } catch (Throwable throwable) { logCleanupFailure(throwable, "failed to rollback: %s", operation.getDescription()); @@ -2420,10 +2425,12 @@ private static class CreateTableOperation private boolean tableCreated; private final boolean ignoreExisting; private final String queryId; + private final ConnectorIdentity identity; - public CreateTableOperation(Table newTable, PrincipalPrivileges privileges, boolean ignoreExisting) + public CreateTableOperation(ConnectorIdentity identity, Table newTable, PrincipalPrivileges privileges, boolean ignoreExisting) { requireNonNull(newTable, "newTable is null"); + this.identity = requireNonNull(identity, "identity is null"); this.newTable = newTable; this.privileges = requireNonNull(privileges, "privileges is null"); this.ignoreExisting = ignoreExisting; @@ -2435,11 +2442,11 @@ public String getDescription() return format("add table %s.%s", newTable.getDatabaseName(), newTable.getTableName()); } - public void run(ExtendedHiveMetastore metastore) + public void run(ExtendedHiveMetastore metastore, HdfsEnvironment hdfsEnvironment) { boolean done = false; try { - metastore.createTable(newTable, privileges); + hdfsEnvironment.doAs(identity.getUser(), () -> metastore.createTable(newTable, privileges)); done = true; } catch (RuntimeException e) { @@ -2513,11 +2520,13 @@ private static class AlterPartitionOperation private final PartitionWithStatistics newPartition; private final PartitionWithStatistics oldPartition; private boolean undo; + private final ConnectorIdentity identity; - public AlterPartitionOperation(PartitionWithStatistics newPartition, PartitionWithStatistics oldPartition) + public AlterPartitionOperation(ConnectorIdentity identity, PartitionWithStatistics newPartition, PartitionWithStatistics oldPartition) { this.newPartition = requireNonNull(newPartition, "newPartition is null"); this.oldPartition = requireNonNull(oldPartition, "oldPartition is null"); + this.identity = requireNonNull(identity, "identity is null"); checkArgument(newPartition.getPartition().getDatabaseName().equals(oldPartition.getPartition().getDatabaseName())); checkArgument(newPartition.getPartition().getTableName().equals(oldPartition.getPartition().getTableName())); checkArgument(newPartition.getPartition().getValues().equals(oldPartition.getPartition().getValues())); @@ -2532,18 +2541,18 @@ public String getDescription() newPartition.getPartition().getValues()); } - public void run(ExtendedHiveMetastore metastore) + public void run(ExtendedHiveMetastore metastore, HdfsEnvironment hdfsEnvironment) { undo = true; - metastore.alterPartition(newPartition.getPartition().getDatabaseName(), newPartition.getPartition().getTableName(), newPartition); + hdfsEnvironment.doAs(identity.getUser(), () -> metastore.alterPartition(newPartition.getPartition().getDatabaseName(), newPartition.getPartition().getTableName(), newPartition)); } - public void undo(ExtendedHiveMetastore metastore) + public void undo(ExtendedHiveMetastore metastore, HdfsEnvironment hdfsEnvironment) { if (!undo) { return; } - metastore.alterPartition(oldPartition.getPartition().getDatabaseName(), oldPartition.getPartition().getTableName(), oldPartition); + hdfsEnvironment.doAs(identity.getUser(), () -> metastore.alterPartition(oldPartition.getPartition().getDatabaseName(), oldPartition.getPartition().getTableName(), oldPartition)); } } @@ -2552,39 +2561,41 @@ private static class UpdateStatisticsOperation private final SchemaTableName tableName; private final Optional partitionName; private final PartitionStatistics statistics; + private final ConnectorIdentity identity; private final boolean merge; private boolean done; - public UpdateStatisticsOperation(SchemaTableName tableName, Optional partitionName, PartitionStatistics statistics, boolean merge) + public UpdateStatisticsOperation(ConnectorIdentity identity, SchemaTableName tableName, Optional partitionName, PartitionStatistics statistics, boolean merge) { this.tableName = requireNonNull(tableName, "tableName is null"); + this.identity = requireNonNull(identity, "identity is null"); this.partitionName = requireNonNull(partitionName, "partitionValues is null"); this.statistics = requireNonNull(statistics, "statistics is null"); this.merge = merge; } - public void run(ExtendedHiveMetastore metastore) + public void run(ExtendedHiveMetastore metastore, HdfsEnvironment hdfsEnvironment) { if (partitionName.isPresent()) { - metastore.updatePartitionStatistics(tableName.getSchemaName(), tableName.getTableName(), partitionName.get(), this::updateStatistics); + hdfsEnvironment.doAs(identity.getUser(), () -> metastore.updatePartitionStatistics(tableName.getSchemaName(), tableName.getTableName(), partitionName.get(), this::updateStatistics)); } else { - metastore.updateTableStatistics(tableName.getSchemaName(), tableName.getTableName(), this::updateStatistics); + hdfsEnvironment.doAs(identity.getUser(), () -> metastore.updateTableStatistics(tableName.getSchemaName(), tableName.getTableName(), this::updateStatistics)); } done = true; } - public void undo(ExtendedHiveMetastore metastore) + public void undo(ExtendedHiveMetastore metastore, HdfsEnvironment hdfsEnvironment) { if (!done) { return; } if (partitionName.isPresent()) { - metastore.updatePartitionStatistics(tableName.getSchemaName(), tableName.getTableName(), partitionName.get(), this::resetStatistics); + hdfsEnvironment.doAs(identity.getUser(), () -> metastore.updatePartitionStatistics(tableName.getSchemaName(), tableName.getTableName(), partitionName.get(), this::resetStatistics)); } else { - metastore.updateTableStatistics(tableName.getSchemaName(), tableName.getTableName(), this::resetStatistics); + hdfsEnvironment.doAs(identity.getUser(), () -> metastore.updateTableStatistics(tableName.getSchemaName(), tableName.getTableName(), this::resetStatistics)); } } @@ -2614,10 +2625,12 @@ private static class PartitionAdder private final ExtendedHiveMetastore metastore; private final int batchSize; private final List partitions; + private final ConnectorIdentity identity; private List> createdPartitionValues = new ArrayList<>(); - public PartitionAdder(String schemaName, String tableName, ExtendedHiveMetastore metastore, int batchSize) + public PartitionAdder(ConnectorIdentity identity, String schemaName, String tableName, ExtendedHiveMetastore metastore, int batchSize) { + this.identity = requireNonNull(identity, "identity is null"); this.schemaName = schemaName; this.tableName = tableName; this.metastore = metastore; @@ -2641,12 +2654,12 @@ public void addPartition(PartitionWithStatistics partition) partitions.add(partition); } - public void execute() + public void execute(HdfsEnvironment hdfsEnvironment) { List> batchedPartitions = Lists.partition(partitions, batchSize); for (List batch : batchedPartitions) { try { - metastore.addPartitions(schemaName, tableName, batch); + hdfsEnvironment.doAs(identity.getUser(), () -> metastore.addPartitions(schemaName, tableName, batch)); for (PartitionWithStatistics partition : batch) { createdPartitionValues.add(partition.getPartition().getValues()); } @@ -2657,7 +2670,7 @@ public void execute() boolean batchCompletelyAdded = true; for (PartitionWithStatistics partition : batch) { try { - Optional remotePartition = metastore.getPartition(schemaName, tableName, partition.getPartition().getValues()); + Optional remotePartition = hdfsEnvironment.doAs(identity.getUser(), () -> metastore.getPartition(schemaName, tableName, partition.getPartition().getValues())); // getPrestoQueryId(partition) is guaranteed to be non-empty. It is asserted in PartitionAdder.addPartition. if (remotePartition.isPresent() && getPrestoQueryId(remotePartition.get()).equals(getPrestoQueryId(partition.getPartition()))) { createdPartitionValues.add(partition.getPartition().getValues()); @@ -2688,13 +2701,13 @@ public void execute() partitions.clear(); } - public List> rollback() + public List> rollback(HdfsEnvironment hdfsEnvironment) { // drop created partitions List> partitionsFailedToRollback = new ArrayList<>(); for (List createdPartitionValue : createdPartitionValues) { try { - metastore.dropPartition(schemaName, tableName, createdPartitionValue, false); + hdfsEnvironment.doAs(identity.getUser(), () -> metastore.dropPartition(schemaName, tableName, createdPartitionValue, false)); } catch (PartitionNotFoundException e) { // Maybe some one deleted the partition we added. @@ -2733,6 +2746,6 @@ public List getNotDeletedEligibleItems() private interface ExclusiveOperation { - void execute(ExtendedHiveMetastore delegate, HdfsEnvironment hdfsEnvironment); + void execute(ExtendedHiveMetastore delegate, HdfsEnvironment hdfsEnvironment, HiveMetastoreAuthentication metastoreAuthentication); } } diff --git a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/HiveCluster.java b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/HiveCluster.java index dfa6e04fa9565..5b4651e6a7f98 100644 --- a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/HiveCluster.java +++ b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/HiveCluster.java @@ -13,8 +13,11 @@ */ package com.facebook.presto.hive.metastore.thrift; +import com.google.common.net.HostAndPort; import org.apache.thrift.TException; +import java.util.List; + /** * A Hive cluster is a single logical installation of Hive. It might * have multiple instances of the metastore service (for scalability @@ -29,6 +32,8 @@ public interface HiveCluster /** * Create a connected {@link HiveMetastoreClient} to this HiveCluster */ - HiveMetastoreClient createMetastoreClient() + HiveMetastoreClient createMetastoreClient(String token) throws TException; + + List getAddresses(); } diff --git a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/HiveMetastoreApiStats.java b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/HiveMetastoreApiStats.java index e8c5bc28291e2..db1f2216556e9 100644 --- a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/HiveMetastoreApiStats.java +++ b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/HiveMetastoreApiStats.java @@ -35,6 +35,12 @@ public class HiveMetastoreApiStats private final CounterStat metastoreExceptions = new CounterStat(); private final CounterStat thriftExceptions = new CounterStat(); + public V wrapCall(Callable callable) + throws Exception + { + return wrap(callable).call(); + } + public Callable wrap(Callable callable) { return () -> { diff --git a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/HiveMetastoreClient.java b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/HiveMetastoreClient.java index c2ed66a74c17c..384b67a956144 100644 --- a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/HiveMetastoreClient.java +++ b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/HiveMetastoreClient.java @@ -13,6 +13,7 @@ */ package com.facebook.presto.hive.metastore.thrift; +import com.google.common.net.HostAndPort; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; @@ -24,6 +25,7 @@ import org.apache.hadoop.hive.metastore.api.Role; import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Client; import org.apache.thrift.TException; import java.io.Closeable; @@ -36,6 +38,10 @@ public interface HiveMetastoreClient @Override void close(); + HostAndPort getAddress(); + + Client getMetastoreClient(); + List getAllDatabases() throws TException; diff --git a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/HiveMetastoreClientFactory.java b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/HiveMetastoreClientFactory.java index 4182c2dfbf03d..5c75638608a7e 100644 --- a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/HiveMetastoreClientFactory.java +++ b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/HiveMetastoreClientFactory.java @@ -52,9 +52,9 @@ public HiveMetastoreClientFactory(MetastoreClientConfig metastoreClientConfig, H this(Optional.empty(), Optional.ofNullable(metastoreClientConfig.getMetastoreSocksProxy()), metastoreClientConfig.getMetastoreTimeout(), metastoreAuthentication); } - public HiveMetastoreClient create(HostAndPort address) + public HiveMetastoreClient create(HostAndPort address, String token) throws TTransportException { - return new ThriftHiveMetastoreClient(Transport.create(address, sslContext, socksProxy, timeoutMillis, metastoreAuthentication)); + return new ThriftHiveMetastoreClient(address, Transport.create(address, sslContext, socksProxy, timeoutMillis, metastoreAuthentication, token)); } } diff --git a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/StaticHiveCluster.java b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/StaticHiveCluster.java index b3b69cb1e2bbd..7bc3fd69e0648 100644 --- a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/StaticHiveCluster.java +++ b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/StaticHiveCluster.java @@ -34,16 +34,18 @@ public class StaticHiveCluster private final List addresses; private final HiveMetastoreClientFactory clientFactory; private final String metastoreUsername; + private final boolean isMultipleMetastore; @Inject public StaticHiveCluster(StaticMetastoreConfig config, HiveMetastoreClientFactory clientFactory) { - this(config.getMetastoreUris(), config.getMetastoreUsername(), clientFactory); + this(config.getMetastoreUris(), config.isMultipleMetastoreEnabled(), config.getMetastoreUsername(), clientFactory); } - public StaticHiveCluster(List metastoreUris, String metastoreUsername, HiveMetastoreClientFactory clientFactory) + public StaticHiveCluster(List metastoreUris, boolean isMultipleMetastore, String metastoreUsername, HiveMetastoreClientFactory clientFactory) { requireNonNull(metastoreUris, "metastoreUris is null"); + this.isMultipleMetastore = isMultipleMetastore; checkArgument(!metastoreUris.isEmpty(), "metastoreUris must specify at least one URI"); this.addresses = metastoreUris.stream() .map(StaticHiveCluster::checkMetastoreUri) @@ -53,6 +55,12 @@ public StaticHiveCluster(List metastoreUris, String metastoreUsername, Hive this.clientFactory = requireNonNull(clientFactory, "clientFactory is null"); } + @Override + public List getAddresses() + { + return addresses; + } + /** * Create a metastore client connected to the Hive metastore. *

@@ -62,16 +70,22 @@ public StaticHiveCluster(List metastoreUris, String metastoreUsername, Hive * connection succeeds or there are no more fallback metastores. */ @Override - public HiveMetastoreClient createMetastoreClient() + public HiveMetastoreClient createMetastoreClient(String token) throws TException { List metastores = new ArrayList<>(addresses); - Collections.shuffle(metastores.subList(1, metastores.size())); + if (isMultipleMetastore) { + Collections.shuffle(metastores); + } + else { + Collections.shuffle(metastores.subList(1, metastores.size())); + } TException lastException = null; for (HostAndPort metastore : metastores) { try { - HiveMetastoreClient client = clientFactory.create(metastore); + HiveMetastoreClient client = clientFactory.create(metastore, token); + if (!isNullOrEmpty(metastoreUsername)) { client.setUGI(metastoreUsername); } diff --git a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/StaticMetastoreConfig.java b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/StaticMetastoreConfig.java index fd8c6d09b4d5e..0a6cfb779b0ca 100644 --- a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/StaticMetastoreConfig.java +++ b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/StaticMetastoreConfig.java @@ -31,6 +31,7 @@ public class StaticMetastoreConfig private List metastoreUris; private String metastoreUsername; + private boolean isMultipleMetastoreEnabled; @NotNull public List getMetastoreUris() @@ -51,6 +52,19 @@ public StaticMetastoreConfig setMetastoreUris(String uris) return this; } + public boolean isMultipleMetastoreEnabled() + { + return isMultipleMetastoreEnabled; + } + + @Config("hive.metastore.multiple-instance.enabled") + @ConfigDescription("Enable load balancing between multiple Metastore instances") + public StaticMetastoreConfig setMultipleMetastoreEnabled(boolean enabled) + { + this.isMultipleMetastoreEnabled = enabled; + return this; + } + public String getMetastoreUsername() { return metastoreUsername; diff --git a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/ThriftHiveMetastore.java b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/ThriftHiveMetastore.java index 27e83e4cecf71..954c4aab7f7f2 100644 --- a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/ThriftHiveMetastore.java +++ b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/ThriftHiveMetastore.java @@ -16,6 +16,7 @@ import com.facebook.presto.hive.HiveBasicStatistics; import com.facebook.presto.hive.HiveType; import com.facebook.presto.hive.HiveViewNotSupportedException; +import com.facebook.presto.hive.MetastoreClientConfig; import com.facebook.presto.hive.PartitionNotFoundException; import com.facebook.presto.hive.RetryDriver; import com.facebook.presto.hive.SchemaAlreadyExistsException; @@ -39,6 +40,7 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Iterables; +import com.google.common.net.HostAndPort; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; @@ -58,13 +60,17 @@ import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.UnknownDBException; import org.apache.hadoop.hive.metastore.api.UnknownTableException; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.thrift.TException; import org.weakref.jmx.Flatten; +import org.weakref.jmx.MBeanExporter; import org.weakref.jmx.Managed; +import org.weakref.jmx.ObjectNames; import javax.annotation.concurrent.ThreadSafe; import javax.inject.Inject; +import java.util.ArrayList; import java.util.HashSet; import java.util.Iterator; import java.util.List; @@ -113,20 +119,46 @@ public class ThriftHiveMetastore implements HiveMetastore { private final ThriftHiveMetastoreStats stats; + private final MBeanExporter exporter; private final HiveCluster clientProvider; private final Function exceptionMapper; + private final Map metastoreStats; + private final boolean isHmsImpersonationEnabled; @Inject - public ThriftHiveMetastore(HiveCluster hiveCluster) + public ThriftHiveMetastore(HiveCluster hiveCluster, MetastoreClientConfig config, MBeanExporter exporter) { - this(hiveCluster, new ThriftHiveMetastoreStats(), identity()); + this(hiveCluster, config, exporter, new ThriftHiveMetastoreStats(), identity()); } - public ThriftHiveMetastore(HiveCluster hiveCluster, ThriftHiveMetastoreStats stats, Function exceptionMapper) + public ThriftHiveMetastore(HiveCluster hiveCluster, MetastoreClientConfig config, MBeanExporter exporter, ThriftHiveMetastoreStats stats, Function exceptionMapper) { + this.isHmsImpersonationEnabled = config.isMetastoreImpersonationEnabled(); + this.exporter = requireNonNull(exporter, "exporter is null"); this.clientProvider = requireNonNull(hiveCluster, "hiveCluster is null"); this.stats = requireNonNull(stats, "stats is null"); this.exceptionMapper = requireNonNull(exceptionMapper, "exceptionMapper is null"); + ImmutableMap.Builder metastoreStatsBuilder = ImmutableMap.builder(); + for (HostAndPort hostAndPort : hiveCluster.getAddresses()) { + metastoreStatsBuilder.put(hostAndPort, new ThriftHiveMetastoreStats()); + } + this.metastoreStats = metastoreStatsBuilder.build(); + exportMetastoreStatistics(); + } + + private void exportMetastoreStatistics() + { + for (HostAndPort key : metastoreStats.keySet()) { + String name = ObjectNames.builder(ThriftHiveMetastore.class) + .withProperty("host", key.getHost()) + .build(); + exporter.export(name, metastoreStats.get(key)); + } + } + + private static boolean isPrestoView(Table table) + { + return "true".equals(table.getParameters().get(PRESTO_VIEW_FLAG)); } @Managed @@ -136,17 +168,21 @@ public ThriftHiveMetastoreStats getStats() return stats; } + @Managed + @Flatten + public List getAllStats() + { + return new ArrayList<>(metastoreStats.values()); + } + @Override public List getAllDatabases() { try { return retry() .stopOnIllegalExceptions() - .run("getAllDatabases", stats.getGetAllDatabases().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - return client.getAllDatabases(); - } - })); + .run("getAllDatabases", stats.getGetAllDatabases().wrap(() -> + wrapMetastoreCallable((HiveMetastoreClient client, ThriftHiveMetastoreStats stat) -> stat.getGetAllDatabases().wrapCall(client::getAllDatabases)))); } catch (TException e) { throw new PrestoException(HIVE_METASTORE_ERROR, e); @@ -163,11 +199,8 @@ public Optional getDatabase(String databaseName) return retry() .stopOn(NoSuchObjectException.class) .stopOnIllegalExceptions() - .run("getDatabase", stats.getGetDatabase().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - return Optional.of(client.getDatabase(databaseName)); - } - })); + .run("getDatabase", stats.getGetDatabase().wrap(() -> + wrapMetastoreCallable((HiveMetastoreClient client, ThriftHiveMetastoreStats stat) -> Optional.of(stat.getGetDatabase().wrapCall(() -> client.getDatabase(databaseName)))))); } catch (NoSuchObjectException e) { return Optional.empty(); @@ -184,14 +217,16 @@ public Optional getDatabase(String databaseName) public Optional> getAllTables(String databaseName) { Callable> getAllTables = stats.getGetAllTables().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - return client.getAllTables(databaseName); + try (HiveMetastoreClient client = clientProvider.createMetastoreClient(null)) { + ThriftHiveMetastoreStats stat = metastoreStats.get(client.getAddress()); + return stat.getGetAllTables().wrap(() -> client.getAllTables(databaseName)).call(); } }); Callable getDatabase = stats.getGetDatabase().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - client.getDatabase(databaseName); + try (HiveMetastoreClient client = clientProvider.createMetastoreClient(null)) { + ThriftHiveMetastoreStats stat = metastoreStats.get(client.getAddress()); + stat.getGetDatabase().wrap(() -> client.getDatabase(databaseName)).call(); return null; } }); @@ -227,15 +262,15 @@ public Optional

getTable(String databaseName, String tableName) return retry() .stopOn(NoSuchObjectException.class, HiveViewNotSupportedException.class) .stopOnIllegalExceptions() - .run("getTable", stats.getGetTable().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - Table table = client.getTable(databaseName, tableName); - if (table.getTableType().equals(TableType.VIRTUAL_VIEW.name()) && !isPrestoView(table)) { - throw new HiveViewNotSupportedException(new SchemaTableName(databaseName, tableName)); - } - return Optional.of(table); - } - })); + .run("getTable", stats.getGetTable().wrap(() -> + wrapMetastoreCallable((HiveMetastoreClient client, ThriftHiveMetastoreStats stat) -> + stat.getGetTable().wrapCall(() -> { + Table table = client.getTable(databaseName, tableName); + if (table.getTableType().equals(TableType.VIRTUAL_VIEW.name()) && !isPrestoView(table)) { + throw new HiveViewNotSupportedException(new SchemaTableName(databaseName, tableName)); + } + return Optional.of(table); + })))); } catch (NoSuchObjectException e) { return Optional.empty(); @@ -254,11 +289,6 @@ public Set getSupportedColumnStatistics(Type type) return MetastoreUtil.getSupportedColumnStatistics(type); } - private static boolean isPrestoView(Table table) - { - return "true".equals(table.getParameters().get(PRESTO_VIEW_FLAG)); - } - @Override public PartitionStatistics getTableStatistics(String databaseName, String tableName) { @@ -278,11 +308,9 @@ private Map getTableColumnStatistics(String databa return retry() .stopOn(NoSuchObjectException.class, HiveViewNotSupportedException.class) .stopOnIllegalExceptions() - .run("getTableColumnStatistics", stats.getGetTableColumnStatistics().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - return groupStatisticsByColumn(client.getTableColumnStatistics(databaseName, tableName, columns), rowCount); - } - })); + .run("getTableColumnStatistics", stats.getGetTableColumnStatistics().wrap(() -> + wrapMetastoreCallable((HiveMetastoreClient client, ThriftHiveMetastoreStats stat) -> + groupStatisticsByColumn(stat.getGetTableColumnStatistics().wrapCall(() -> client.getTableColumnStatistics(databaseName, tableName, columns)), rowCount)))); } catch (NoSuchObjectException e) { throw new TableNotFoundException(new SchemaTableName(databaseName, tableName)); @@ -336,11 +364,9 @@ public Optional> getFields(String databaseName, String tableNa return retry() .stopOn(MetaException.class, UnknownTableException.class, UnknownDBException.class) .stopOnIllegalExceptions() - .run("getFields", stats.getGetFields().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - return Optional.of(ImmutableList.copyOf(client.getFields(databaseName, tableName))); - } - })); + .run("getFields", stats.getGetFields().wrap(() -> + wrapMetastoreCallable((HiveMetastoreClient client, ThriftHiveMetastoreStats stat) -> + Optional.of(ImmutableList.copyOf(stat.getGetFields().wrapCall(() -> client.getFields(databaseName, tableName))))))); } catch (NoSuchObjectException e) { return Optional.empty(); @@ -373,11 +399,9 @@ private Map> getMetastorePartitionColumnStatis return retry() .stopOn(NoSuchObjectException.class, HiveViewNotSupportedException.class) .stopOnIllegalExceptions() - .run("getPartitionColumnStatistics", stats.getGetPartitionColumnStatistics().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - return client.getPartitionColumnStatistics(databaseName, tableName, ImmutableList.copyOf(partitionNames), columnNames); - } - })); + .run("getPartitionColumnStatistics", stats.getGetPartitionColumnStatistics().wrap(() -> + wrapMetastoreCallable((HiveMetastoreClient client, ThriftHiveMetastoreStats stat) -> + stat.getGetPartitionColumnStatistics().wrapCall(() -> client.getPartitionColumnStatistics(databaseName, tableName, ImmutableList.copyOf(partitionNames), columnNames))))); } catch (NoSuchObjectException e) { throw new TableNotFoundException(new SchemaTableName(databaseName, tableName)); @@ -427,12 +451,12 @@ private void setTableColumnStatistics(String databaseName, String tableName, Lis retry() .stopOn(NoSuchObjectException.class, InvalidObjectException.class, MetaException.class, InvalidInputException.class) .stopOnIllegalExceptions() - .run("setTableColumnStatistics", stats.getCreateDatabase().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - client.setTableColumnStatistics(databaseName, tableName, statistics); - } - return null; - })); + .run("setTableColumnStatistics", stats.getUpdateTableColumnStatistics().wrap(() -> + wrapMetastoreCallable((HiveMetastoreClient client, ThriftHiveMetastoreStats stat) -> + stat.getUpdateTableColumnStatistics().wrapCall(() -> { + client.setTableColumnStatistics(databaseName, tableName, statistics); + return null; + })))); } catch (NoSuchObjectException e) { throw new TableNotFoundException(new SchemaTableName(databaseName, tableName)); @@ -451,12 +475,12 @@ private void deleteTableColumnStatistics(String databaseName, String tableName, retry() .stopOn(NoSuchObjectException.class, InvalidObjectException.class, MetaException.class, InvalidInputException.class) .stopOnIllegalExceptions() - .run("deleteTableColumnStatistics", stats.getCreateDatabase().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - client.deleteTableColumnStatistics(databaseName, tableName, columnName); - } - return null; - })); + .run("deleteTableColumnStatistics", stats.getUpdatePartitionColumnStatistics().wrap(() -> + wrapMetastoreCallable((HiveMetastoreClient client, ThriftHiveMetastoreStats stat) -> + stat.getUpdatePartitionColumnStatistics().wrapCall(() -> { + client.deleteTableColumnStatistics(databaseName, tableName, columnName); + return null; + })))); } catch (NoSuchObjectException e) { throw new TableNotFoundException(new SchemaTableName(databaseName, tableName)); @@ -518,12 +542,12 @@ private void setPartitionColumnStatistics(String databaseName, String tableName, retry() .stopOn(NoSuchObjectException.class, InvalidObjectException.class, MetaException.class, InvalidInputException.class) .stopOnIllegalExceptions() - .run("setPartitionColumnStatistics", stats.getCreateDatabase().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - client.setPartitionColumnStatistics(databaseName, tableName, partitionName, statistics); - } - return null; - })); + .run("setPartitionColumnStatistics", stats.getUpdatePartitionColumnStatistics().wrap(() -> + wrapMetastoreCallable((HiveMetastoreClient client, ThriftHiveMetastoreStats stat) -> + stat.getUpdatePartitionColumnStatistics().wrapCall(() -> { + client.setPartitionColumnStatistics(databaseName, tableName, partitionName, statistics); + return null; + })))); } catch (NoSuchObjectException e) { throw new TableNotFoundException(new SchemaTableName(databaseName, tableName)); @@ -542,12 +566,12 @@ private void deletePartitionColumnStatistics(String databaseName, String tableNa retry() .stopOn(NoSuchObjectException.class, InvalidObjectException.class, MetaException.class, InvalidInputException.class) .stopOnIllegalExceptions() - .run("deletePartitionColumnStatistics", stats.getCreateDatabase().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - client.deletePartitionColumnStatistics(databaseName, tableName, partitionName, columnName); - } - return null; - })); + .run("deletePartitionColumnStatistics", stats.getUpdatePartitionColumnStatistics().wrap(() -> + wrapMetastoreCallable((HiveMetastoreClient client, ThriftHiveMetastoreStats stat) -> + stat.getUpdatePartitionColumnStatistics().wrapCall(() -> { + client.deletePartitionColumnStatistics(databaseName, tableName, partitionName, columnName); + return null; + })))); } catch (NoSuchObjectException e) { throw new TableNotFoundException(new SchemaTableName(databaseName, tableName)); @@ -567,12 +591,12 @@ public void createRole(String role, String grantor) retry() .stopOn(MetaException.class) .stopOnIllegalExceptions() - .run("createRole", stats.getCreateRole().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - client.createRole(role, grantor); - return null; - } - })); + .run("createRole", stats.getCreateRole().wrap(() -> + wrapMetastoreCallable((HiveMetastoreClient client, ThriftHiveMetastoreStats stat) -> + stat.getCreateRole().wrapCall(() -> { + client.createRole(role, grantor); + return null; + })))); } catch (TException e) { throw new PrestoException(HIVE_METASTORE_ERROR, e); @@ -589,12 +613,12 @@ public void dropRole(String role) retry() .stopOn(MetaException.class) .stopOnIllegalExceptions() - .run("dropRole", stats.getDropRole().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - client.dropRole(role); - return null; - } - })); + .run("dropRole", stats.getDropRole().wrap(() -> + wrapMetastoreCallable((HiveMetastoreClient client, ThriftHiveMetastoreStats stat) -> + stat.getDropRole().wrapCall(() -> { + client.dropRole(role); + return null; + })))); } catch (TException e) { throw new PrestoException(HIVE_METASTORE_ERROR, e); @@ -611,11 +635,8 @@ public Set listRoles() return retry() .stopOn(MetaException.class) .stopOnIllegalExceptions() - .run("listRoles", stats.getListRoles().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - return ImmutableSet.copyOf(client.getRoleNames()); - } - })); + .run("listRoles", stats.getListRoles().wrap(() -> + wrapMetastoreCallable((HiveMetastoreClient client, ThriftHiveMetastoreStats stat) -> ImmutableSet.copyOf(stat.getListRoles().wrapCall(() -> client.getRoleNames()))))); } catch (TException e) { throw new PrestoException(HIVE_METASTORE_ERROR, e); @@ -645,12 +666,12 @@ private void grantRole(String role, String granteeName, PrincipalType granteeTyp retry() .stopOn(MetaException.class) .stopOnIllegalExceptions() - .run("grantRole", stats.getGrantRole().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - client.grantRole(role, granteeName, granteeType, grantorName, grantorType, grantOption); - return null; - } - })); + .run("grantRole", stats.getGrantRole().wrap(() -> + wrapMetastoreCallable((HiveMetastoreClient client, ThriftHiveMetastoreStats stat) -> + stat.getGrantRole().wrapCall(() -> { + client.grantRole(role, granteeName, granteeType, grantorName, grantorType, grantOption); + return null; + })))); } catch (TException e) { throw new PrestoException(HIVE_METASTORE_ERROR, e); @@ -679,12 +700,12 @@ private void revokeRole(String role, String granteeName, PrincipalType granteeTy retry() .stopOn(MetaException.class) .stopOnIllegalExceptions() - .run("revokeRole", stats.getRevokeRole().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - client.revokeRole(role, granteeName, granteeType, grantOption); - return null; - } - })); + .run("revokeRole", stats.getRevokeRole().wrap(() -> + wrapMetastoreCallable((HiveMetastoreClient client, ThriftHiveMetastoreStats stat) -> + stat.getRevokeRole().wrapCall(() -> { + client.revokeRole(role, granteeName, granteeType, grantOption); + return null; + })))); } catch (TException e) { throw new PrestoException(HIVE_METASTORE_ERROR, e); @@ -701,11 +722,9 @@ public Set listRoleGrants(PrestoPrincipal principal) return retry() .stopOn(MetaException.class) .stopOnIllegalExceptions() - .run("listRoleGrants", stats.getListRoleGrants().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - return fromRolePrincipalGrants(client.listRoleGrants(principal.getName(), fromPrestoPrincipalType(principal.getType()))); - } - })); + .run("listRoleGrants", stats.getListRoleGrants().wrap(() -> + wrapMetastoreCallable((HiveMetastoreClient client, ThriftHiveMetastoreStats stat) -> + fromRolePrincipalGrants(stat.getListRoleGrants().wrapCall(() -> client.listRoleGrants(principal.getName(), fromPrestoPrincipalType(principal.getType()))))))); } catch (TException e) { throw new PrestoException(HIVE_METASTORE_ERROR, e); @@ -722,12 +741,12 @@ public Optional> getAllViews(String databaseName) return retry() .stopOn(UnknownDBException.class) .stopOnIllegalExceptions() - .run("getAllViews", stats.getGetAllViews().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - String filter = HIVE_FILTER_FIELD_PARAMS + PRESTO_VIEW_FLAG + " = \"true\""; - return Optional.of(client.getTableNamesByFilter(databaseName, filter)); - } - })); + .run("getAllViews", stats.getGetAllViews().wrap(() -> + wrapMetastoreCallable((HiveMetastoreClient client, ThriftHiveMetastoreStats stat) -> + stat.getGetAllViews().wrapCall(() -> { + String filter = HIVE_FILTER_FIELD_PARAMS + PRESTO_VIEW_FLAG + " = \"true\""; + return Optional.of(client.getTableNamesByFilter(databaseName, filter)); + })))); } catch (UnknownDBException e) { return Optional.empty(); @@ -747,12 +766,12 @@ public void createDatabase(Database database) retry() .stopOn(AlreadyExistsException.class, InvalidObjectException.class, MetaException.class) .stopOnIllegalExceptions() - .run("createDatabase", stats.getCreateDatabase().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - client.createDatabase(database); - } - return null; - })); + .run("createDatabase", stats.getCreateDatabase().wrap(() -> + wrapMetastoreCallable((HiveMetastoreClient client, ThriftHiveMetastoreStats stat) -> + stat.getAlterDatabase().wrapCall(() -> { + client.createDatabase(database); + return null; + })))); } catch (AlreadyExistsException e) { throw new SchemaAlreadyExistsException(database.getName()); @@ -772,12 +791,12 @@ public void dropDatabase(String databaseName) retry() .stopOn(NoSuchObjectException.class, InvalidOperationException.class) .stopOnIllegalExceptions() - .run("dropDatabase", stats.getDropDatabase().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - client.dropDatabase(databaseName, false, false); - } - return null; - })); + .run("dropDatabase", stats.getAlterDatabase().wrap(() -> + wrapMetastoreCallable((HiveMetastoreClient client, ThriftHiveMetastoreStats stat) -> + stat.getAlterDatabase().wrapCall(() -> { + client.dropDatabase(databaseName, false, false); + return null; + })))); } catch (NoSuchObjectException e) { throw new SchemaNotFoundException(databaseName); @@ -790,6 +809,13 @@ public void dropDatabase(String databaseName) } } + @FunctionalInterface + public interface MetastoreCallable + { + V call(HiveMetastoreClient client, ThriftHiveMetastoreStats stats) + throws Exception; + } + @Override public void alterDatabase(String databaseName, Database database) { @@ -797,12 +823,12 @@ public void alterDatabase(String databaseName, Database database) retry() .stopOn(NoSuchObjectException.class, MetaException.class) .stopOnIllegalExceptions() - .run("alterDatabase", stats.getAlterDatabase().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - client.alterDatabase(databaseName, database); - } - return null; - })); + .run("alterDatabase", stats.getAlterDatabase().wrap(() -> + wrapMetastoreCallable((HiveMetastoreClient client, ThriftHiveMetastoreStats stat) -> + stat.getAlterDatabase().wrapCall(() -> { + client.alterDatabase(databaseName, database); + return null; + })))); } catch (NoSuchObjectException e) { throw new SchemaNotFoundException(databaseName); @@ -822,12 +848,12 @@ public void createTable(Table table) retry() .stopOn(AlreadyExistsException.class, InvalidObjectException.class, MetaException.class, NoSuchObjectException.class) .stopOnIllegalExceptions() - .run("createTable", stats.getCreateTable().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - client.createTable(table); - } - return null; - })); + .run("createTable", stats.getCreateTable().wrap(() -> + wrapMetastoreCallable((HiveMetastoreClient client, ThriftHiveMetastoreStats stat) -> + stat.getCreateTable().wrapCall(() -> { + client.createTable(table); + return null; + })))); } catch (AlreadyExistsException e) { throw new TableAlreadyExistsException(new SchemaTableName(table.getDbName(), table.getTableName())); @@ -850,12 +876,12 @@ public void dropTable(String databaseName, String tableName, boolean deleteData) retry() .stopOn(NoSuchObjectException.class) .stopOnIllegalExceptions() - .run("dropTable", stats.getDropTable().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - client.dropTable(databaseName, tableName, deleteData); - } - return null; - })); + .run("dropTable", stats.getDropTable().wrap(() -> + wrapMetastoreCallable((HiveMetastoreClient client, ThriftHiveMetastoreStats stat) -> + stat.getDropTable().wrapCall(() -> { + client.dropTable(databaseName, tableName, deleteData); + return null; + })))); } catch (NoSuchObjectException e) { throw new TableNotFoundException(new SchemaTableName(databaseName, tableName)); @@ -875,16 +901,17 @@ public void alterTable(String databaseName, String tableName, Table table) retry() .stopOn(InvalidOperationException.class, MetaException.class) .stopOnIllegalExceptions() - .run("alterTable", stats.getAlterTable().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - Optional
source = getTable(databaseName, tableName); - if (!source.isPresent()) { - throw new TableNotFoundException(new SchemaTableName(databaseName, tableName)); - } - client.alterTable(databaseName, tableName, table); - } - return null; - })); + .run("alterTable", stats.getAlterTable().wrap(() -> + wrapMetastoreCallable((HiveMetastoreClient client, ThriftHiveMetastoreStats stat) -> { + Optional
source = getTable(databaseName, tableName); + if (!source.isPresent()) { + throw new TableNotFoundException(new SchemaTableName(databaseName, tableName)); + } + return stat.getAlterTable().wrapCall(() -> { + client.alterTable(databaseName, tableName, table); + return null; + }); + }))); } catch (NoSuchObjectException e) { throw new TableNotFoundException(new SchemaTableName(databaseName, tableName)); @@ -904,11 +931,9 @@ public Optional> getPartitionNames(String databaseName, String tabl return retry() .stopOn(NoSuchObjectException.class) .stopOnIllegalExceptions() - .run("getPartitionNames", stats.getGetPartitionNames().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - return Optional.of(client.getPartitionNames(databaseName, tableName)); - } - })); + .run("getPartitionNames", stats.getGetPartitionNames().wrap(() -> + wrapMetastoreCallable((HiveMetastoreClient client, ThriftHiveMetastoreStats stat) -> + Optional.of(stat.getGetPartitionsByNames().wrapCall(() -> client.getPartitionNames(databaseName, tableName)))))); } catch (NoSuchObjectException e) { return Optional.empty(); @@ -928,11 +953,8 @@ public Optional> getPartitionNamesByParts(String databaseName, Stri return retry() .stopOn(NoSuchObjectException.class) .stopOnIllegalExceptions() - .run("getPartitionNamesByParts", stats.getGetPartitionNamesPs().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - return Optional.of(client.getPartitionNamesFiltered(databaseName, tableName, parts)); - } - })); + .run("getPartitionNamesByParts", stats.getGetPartitionNamesPs().wrap(() -> + wrapMetastoreCallable((HiveMetastoreClient client, ThriftHiveMetastoreStats stat) -> Optional.of(stat.getGetPartitionNamesPs().wrapCall(() -> client.getPartitionNamesFiltered(databaseName, tableName, parts)))))); } catch (NoSuchObjectException e) { return Optional.empty(); @@ -973,16 +995,16 @@ private void addPartitionsWithoutStatistics(String databaseName, String tableNam retry() .stopOn(AlreadyExistsException.class, InvalidObjectException.class, MetaException.class, NoSuchObjectException.class, PrestoException.class) .stopOnIllegalExceptions() - .run("addPartitions", stats.getAddPartitions().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - int partitionsAdded = client.addPartitions(partitions); - if (partitionsAdded != partitions.size()) { - throw new PrestoException(HIVE_METASTORE_ERROR, - format("Hive metastore only added %s of %s partitions", partitionsAdded, partitions.size())); - } - return null; - } - })); + .run("addPartitions", stats.getAddPartitions().wrap(() -> + wrapMetastoreCallable((HiveMetastoreClient client, ThriftHiveMetastoreStats stat) -> + stat.getAddPartitions().wrapCall(() -> { + int partitionsAdded = client.addPartitions(partitions); + if (partitionsAdded != partitions.size()) { + throw new PrestoException(HIVE_METASTORE_ERROR, + format("Hive metastore only added %s of %s partitions", partitionsAdded, partitions.size())); + } + return null; + })))); } catch (AlreadyExistsException e) { throw new PrestoException(ALREADY_EXISTS, format("One or more partitions already exist for table '%s.%s'", databaseName, tableName), e); @@ -998,6 +1020,20 @@ private void addPartitionsWithoutStatistics(String databaseName, String tableNam } } + private V wrapMetastoreCallable(MetastoreCallable callable) + throws Exception + { + if (!isHmsImpersonationEnabled) { + HiveMetastoreClient client = clientProvider.createMetastoreClient(null); + return callable.call(client, metastoreStats.get(client.getAddress())); + } + try (HiveMetastoreClient client = clientProvider.createMetastoreClient(null)) { + String token = client.getMetastoreClient().get_delegation_token(UserGroupInformation.getCurrentUser().getShortUserName(), UserGroupInformation.getCurrentUser().getShortUserName()); + HiveMetastoreClient realClient = clientProvider.createMetastoreClient(token); + return callable.call(realClient, metastoreStats.get(realClient.getAddress())); + } + } + @Override public void dropPartition(String databaseName, String tableName, List parts, boolean deleteData) { @@ -1005,12 +1041,12 @@ public void dropPartition(String databaseName, String tableName, List pa retry() .stopOn(NoSuchObjectException.class, MetaException.class) .stopOnIllegalExceptions() - .run("dropPartition", stats.getDropPartition().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - client.dropPartition(databaseName, tableName, parts, deleteData); - } - return null; - })); + .run("dropPartition", stats.getDropPartition().wrap(() -> + wrapMetastoreCallable((HiveMetastoreClient client, ThriftHiveMetastoreStats stat) -> + stat.getDropPartition().wrapCall(() -> { + client.dropPartition(databaseName, tableName, parts, deleteData); + return null; + })))); } catch (NoSuchObjectException e) { throw new PartitionNotFoundException(new SchemaTableName(databaseName, tableName), parts); @@ -1037,12 +1073,12 @@ private void alterPartitionWithoutStatistics(String databaseName, String tableNa retry() .stopOn(NoSuchObjectException.class, MetaException.class) .stopOnIllegalExceptions() - .run("alterPartition", stats.getAlterPartition().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - client.alterPartition(databaseName, tableName, partition); - } - return null; - })); + .run("alterPartition", stats.getAlterPartition().wrap(() -> + wrapMetastoreCallable((HiveMetastoreClient client, ThriftHiveMetastoreStats stat) -> + stat.getAlterPartition().wrapCall(() -> { + client.alterPartition(databaseName, tableName, partition); + return null; + })))); } catch (NoSuchObjectException e) { throw new PartitionNotFoundException(new SchemaTableName(databaseName, tableName), partition.getValues()); @@ -1116,11 +1152,9 @@ public Optional getPartition(String databaseName, String tableName, L return retry() .stopOn(NoSuchObjectException.class) .stopOnIllegalExceptions() - .run("getPartition", stats.getGetPartition().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - return Optional.of(client.getPartition(databaseName, tableName, partitionValues)); - } - })); + .run("getPartition", stats.getGetPartition().wrap(() -> + wrapMetastoreCallable((HiveMetastoreClient client, ThriftHiveMetastoreStats stat) -> + Optional.of(stat.getGetPartition().wrapCall(() -> client.getPartition(databaseName, tableName, partitionValues)))))); } catch (NoSuchObjectException e) { return Optional.empty(); @@ -1143,11 +1177,9 @@ public List getPartitionsByNames(String databaseName, String tableNam return retry() .stopOn(NoSuchObjectException.class) .stopOnIllegalExceptions() - .run("getPartitionsByNames", stats.getGetPartitionsByNames().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - return client.getPartitionsByNames(databaseName, tableName, partitionNames); - } - })); + .run("getPartitionsByNames", stats.getGetPartitionsByNames().wrap(() -> + wrapMetastoreCallable((HiveMetastoreClient client, ThriftHiveMetastoreStats stat) -> + stat.getGetPartitionsByNames().wrapCall(() -> client.getPartitionsByNames(databaseName, tableName, partitionNames))))); } catch (NoSuchObjectException e) { // assume none of the partitions in the batch are available @@ -1172,37 +1204,34 @@ public void grantTablePrivileges(String databaseName, String tableName, PrestoPr try { retry() .stopOnIllegalExceptions() - .run("grantTablePrivileges", stats.getGrantTablePrivileges().wrap(() -> { - try (HiveMetastoreClient metastoreClient = clientProvider.createMetastoreClient()) { - Set existingPrivileges = listTablePrivileges(databaseName, tableName, grantee); - - Set privilegesToGrant = new HashSet<>(requestedPrivileges); - Iterator iterator = privilegesToGrant.iterator(); - while (iterator.hasNext()) { - HivePrivilegeInfo requestedPrivilege = getOnlyElement(parsePrivilege(iterator.next(), Optional.empty())); - - for (HivePrivilegeInfo existingPrivilege : existingPrivileges) { - if ((requestedPrivilege.isContainedIn(existingPrivilege))) { - iterator.remove(); - } - else if (existingPrivilege.isContainedIn(requestedPrivilege)) { - throw new PrestoException(NOT_SUPPORTED, format( - "Granting %s WITH GRANT OPTION is not supported while %s possesses %s", - requestedPrivilege.getHivePrivilege().name(), - grantee, - requestedPrivilege.getHivePrivilege().name())); + .run("grantTablePrivileges", stats.getGrantTablePrivileges().wrap(() -> + wrapMetastoreCallable((HiveMetastoreClient client, ThriftHiveMetastoreStats stat) -> { + Set existingPrivileges = listTablePrivileges(databaseName, tableName, grantee); + Set privilegesToGrant = new HashSet<>(requestedPrivileges); + Iterator iterator = privilegesToGrant.iterator(); + while (iterator.hasNext()) { + HivePrivilegeInfo requestedPrivilege = getOnlyElement(parsePrivilege(iterator.next(), Optional.empty())); + + for (HivePrivilegeInfo existingPrivilege : existingPrivileges) { + if ((requestedPrivilege.isContainedIn(existingPrivilege))) { + iterator.remove(); + } + else if (existingPrivilege.isContainedIn(requestedPrivilege)) { + throw new PrestoException(NOT_SUPPORTED, format( + "Granting %s WITH GRANT OPTION is not supported while %s possesses %s", + requestedPrivilege.getHivePrivilege().name(), + grantee, + requestedPrivilege.getHivePrivilege().name())); + } } } - } - if (privilegesToGrant.isEmpty()) { - return null; - } + if (privilegesToGrant.isEmpty()) { + return null; + } - metastoreClient.grantPrivileges(buildPrivilegeBag(databaseName, tableName, grantee, privilegesToGrant)); - } - return null; - })); + return stat.getGrantTablePrivileges().wrapCall(() -> client.grantPrivileges(buildPrivilegeBag(databaseName, tableName, grantee, privilegesToGrant))); + }))); } catch (TException e) { throw new PrestoException(HIVE_METASTORE_ERROR, e); @@ -1223,8 +1252,8 @@ public void revokeTablePrivileges(String databaseName, String tableName, PrestoP try { retry() .stopOnIllegalExceptions() - .run("revokeTablePrivileges", stats.getRevokeTablePrivileges().wrap(() -> { - try (HiveMetastoreClient metastoreClient = clientProvider.createMetastoreClient()) { + .run("revokeTablePrivileges", stats.getRevokeTablePrivileges().wrap(() -> + wrapMetastoreCallable((HiveMetastoreClient client, ThriftHiveMetastoreStats stat) -> { Set existingHivePrivileges = listTablePrivileges(databaseName, tableName, grantee).stream() .map(HivePrivilegeInfo::getHivePrivilege) .collect(toSet()); @@ -1237,10 +1266,8 @@ public void revokeTablePrivileges(String databaseName, String tableName, PrestoP return null; } - metastoreClient.revokePrivileges(buildPrivilegeBag(databaseName, tableName, grantee, privilegesToRevoke)); - } - return null; - })); + return stat.getRevokeTablePrivileges().wrapCall(() -> client.revokePrivileges(buildPrivilegeBag(databaseName, tableName, grantee, privilegesToRevoke))); + }))); } catch (TException e) { throw new PrestoException(HIVE_METASTORE_ERROR, e); @@ -1256,34 +1283,33 @@ public Set listTablePrivileges(String databaseName, String ta try { return retry() .stopOnIllegalExceptions() - .run("getListPrivileges", stats.getListPrivileges().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - Table table = client.getTable(databaseName, tableName); - ImmutableSet.Builder privileges = ImmutableSet.builder(); - List hiveObjectPrivilegeList; - // principal can be null when we want to list all privileges for admins - if (principal == null) { - hiveObjectPrivilegeList = client.listPrivileges( - null, - null, - new HiveObjectRef(TABLE, databaseName, tableName, null, null)); - } - else { - if (principal.getType() == USER && table.getOwner().equals(principal.getName())) { - privileges.add(new HivePrivilegeInfo(OWNERSHIP, true, principal, principal)); + .run("getListPrivileges", stats.getListPrivileges().wrap(() -> + wrapMetastoreCallable((HiveMetastoreClient client, ThriftHiveMetastoreStats stat) -> { + Table table = client.getTable(databaseName, tableName); + ImmutableSet.Builder privileges = ImmutableSet.builder(); + List hiveObjectPrivilegeList; + // principal can be null when we want to list all privileges for admins + if (principal == null) { + hiveObjectPrivilegeList = client.listPrivileges( + null, + null, + new HiveObjectRef(TABLE, databaseName, tableName, null, null)); } - hiveObjectPrivilegeList = client.listPrivileges( - principal.getName(), - fromPrestoPrincipalType(principal.getType()), - new HiveObjectRef(TABLE, databaseName, tableName, null, null)); - } - for (HiveObjectPrivilege hiveObjectPrivilege : hiveObjectPrivilegeList) { - PrestoPrincipal grantee = new PrestoPrincipal(fromMetastoreApiPrincipalType(hiveObjectPrivilege.getPrincipalType()), hiveObjectPrivilege.getPrincipalName()); - privileges.addAll(parsePrivilege(hiveObjectPrivilege.getGrantInfo(), Optional.of(grantee))); - } - return privileges.build(); - } - })); + else { + if (principal.getType() == USER && table.getOwner().equals(principal.getName())) { + privileges.add(new HivePrivilegeInfo(OWNERSHIP, true, principal, principal)); + } + hiveObjectPrivilegeList = client.listPrivileges( + principal.getName(), + fromPrestoPrincipalType(principal.getType()), + new HiveObjectRef(TABLE, databaseName, tableName, null, null)); + } + for (HiveObjectPrivilege hiveObjectPrivilege : hiveObjectPrivilegeList) { + PrestoPrincipal grantee = new PrestoPrincipal(fromMetastoreApiPrincipalType(hiveObjectPrivilege.getPrincipalType()), hiveObjectPrivilege.getPrincipalName()); + privileges.addAll(parsePrivilege(hiveObjectPrivilege.getGrantInfo(), Optional.of(grantee))); + } + return privileges.build(); + }))); } catch (TException e) { throw new PrestoException(HIVE_METASTORE_ERROR, e); diff --git a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/ThriftHiveMetastoreClient.java b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/ThriftHiveMetastoreClient.java index 59021cf2d1edd..28028989d4332 100644 --- a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/ThriftHiveMetastoreClient.java +++ b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/ThriftHiveMetastoreClient.java @@ -14,6 +14,7 @@ package com.facebook.presto.hive.metastore.thrift; import com.google.common.collect.ImmutableList; +import com.google.common.net.HostAndPort; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; @@ -52,15 +53,18 @@ public class ThriftHiveMetastoreClient { private final TTransport transport; private final ThriftHiveMetastore.Client client; + private final HostAndPort address; - public ThriftHiveMetastoreClient(TTransport transport) + public ThriftHiveMetastoreClient(HostAndPort address, TTransport transport) { + this.address = address; this.transport = requireNonNull(transport, "transport is null"); this.client = new ThriftHiveMetastore.Client(new TBinaryProtocol(transport)); } - public ThriftHiveMetastoreClient(TProtocol protocol) + public ThriftHiveMetastoreClient(HostAndPort address, TProtocol protocol) { + this.address = address; this.transport = protocol.getTransport(); this.client = new ThriftHiveMetastore.Client(protocol); } @@ -71,6 +75,18 @@ public void close() transport.close(); } + @Override + public HostAndPort getAddress() + { + return address; + } + + @Override + public ThriftHiveMetastore.Client getMetastoreClient() + { + return client; + } + @Override public List getAllDatabases() throws TException diff --git a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/ThriftHiveMetastoreStats.java b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/ThriftHiveMetastoreStats.java index fa5f5cebd6192..a95f9290972ff 100644 --- a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/ThriftHiveMetastoreStats.java +++ b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/ThriftHiveMetastoreStats.java @@ -26,6 +26,8 @@ public class ThriftHiveMetastoreStats private final HiveMetastoreApiStats getFields = new HiveMetastoreApiStats(); private final HiveMetastoreApiStats getTableColumnStatistics = new HiveMetastoreApiStats(); private final HiveMetastoreApiStats getPartitionColumnStatistics = new HiveMetastoreApiStats(); + private final HiveMetastoreApiStats updateTableColumnStatistics = new HiveMetastoreApiStats(); + private final HiveMetastoreApiStats updatePartitionColumnStatistics = new HiveMetastoreApiStats(); private final HiveMetastoreApiStats getPartitionNames = new HiveMetastoreApiStats(); private final HiveMetastoreApiStats getPartitionNamesPs = new HiveMetastoreApiStats(); private final HiveMetastoreApiStats getPartition = new HiveMetastoreApiStats(); @@ -105,6 +107,20 @@ public HiveMetastoreApiStats getGetPartitionColumnStatistics() return getPartitionColumnStatistics; } + @Managed + @Nested + public HiveMetastoreApiStats getUpdateTableColumnStatistics() + { + return updateTableColumnStatistics; + } + + @Managed + @Nested + public HiveMetastoreApiStats getUpdatePartitionColumnStatistics() + { + return updatePartitionColumnStatistics; + } + @Managed @Nested public HiveMetastoreApiStats getGetPartitionNames() diff --git a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/ThriftMetastoreUtil.java b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/ThriftMetastoreUtil.java index 6eab0b1be2863..be4c99a4d6620 100644 --- a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/ThriftMetastoreUtil.java +++ b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/ThriftMetastoreUtil.java @@ -236,18 +236,18 @@ protected RoleGrant computeNext() }); } - public static boolean isRoleApplicable(SemiTransactionalHiveMetastore metastore, PrestoPrincipal principal, String role) + public static boolean isRoleApplicable(SemiTransactionalHiveMetastore metastore, ConnectorIdentity identity, PrestoPrincipal principal, String role) { if (principal.getType() == ROLE && principal.getName().equals(role)) { return true; } - return listApplicableRoles(metastore, principal) + return listApplicableRoles(metastore, identity, principal) .anyMatch(role::equals); } - public static Stream listApplicableRoles(SemiTransactionalHiveMetastore metastore, PrestoPrincipal principal) + public static Stream listApplicableRoles(SemiTransactionalHiveMetastore metastore, ConnectorIdentity identity, PrestoPrincipal principal) { - return listApplicableRoles(principal, metastore::listRoleGrants) + return listApplicableRoles(principal, (PrestoPrincipal p) -> metastore.listRoleGrants(identity, p)) .map(RoleGrant::getRoleName); } @@ -255,28 +255,28 @@ public static Stream listEnabledPrincipals(SemiTransactionalHiv { return Stream.concat( Stream.of(new PrestoPrincipal(USER, identity.getUser())), - listEnabledRoles(identity, metastore::listRoleGrants) + listEnabledRoles(identity, (PrestoPrincipal p) -> metastore.listRoleGrants(identity, p)) .map(role -> new PrestoPrincipal(ROLE, role))); } public static Stream listEnabledTablePrivileges(SemiTransactionalHiveMetastore metastore, String databaseName, String tableName, ConnectorIdentity identity) { - return listTablePrivileges(metastore, databaseName, tableName, listEnabledPrincipals(metastore, identity)); + return listTablePrivileges(identity, metastore, databaseName, tableName, listEnabledPrincipals(metastore, identity)); } - public static Stream listApplicableTablePrivileges(SemiTransactionalHiveMetastore metastore, String databaseName, String tableName, String user) + public static Stream listApplicableTablePrivileges(SemiTransactionalHiveMetastore metastore, ConnectorIdentity identity, String databaseName, String tableName, String user) { PrestoPrincipal userPrincipal = new PrestoPrincipal(USER, user); Stream principals = Stream.concat( Stream.of(userPrincipal), - listApplicableRoles(metastore, userPrincipal) + listApplicableRoles(metastore, identity, userPrincipal) .map(role -> new PrestoPrincipal(ROLE, role))); - return listTablePrivileges(metastore, databaseName, tableName, principals); + return listTablePrivileges(identity, metastore, databaseName, tableName, principals); } - private static Stream listTablePrivileges(SemiTransactionalHiveMetastore metastore, String databaseName, String tableName, Stream principals) + private static Stream listTablePrivileges(ConnectorIdentity identity, SemiTransactionalHiveMetastore metastore, String databaseName, String tableName, Stream principals) { - return principals.flatMap(principal -> metastore.listTablePrivileges(databaseName, tableName, principal).stream()); + return principals.flatMap(principal -> metastore.listTablePrivileges(identity, databaseName, tableName, principal).stream()); } public static boolean isRoleEnabled(ConnectorIdentity identity, Function> listRoleGrants, String role) diff --git a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/Transport.java b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/Transport.java index f23c245ba85b1..f861ccb035329 100644 --- a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/Transport.java +++ b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/Transport.java @@ -36,12 +36,19 @@ public static TTransport create( Optional sslContext, Optional socksProxy, int timeoutMillis, - HiveMetastoreAuthentication authentication) + HiveMetastoreAuthentication authentication, + String tokenString) throws TTransportException { try { TTransport rawTransport = createRaw(address, sslContext, socksProxy, timeoutMillis); - TTransport authenticatedTransport = authentication.authenticate(rawTransport, address.getHost()); + TTransport authenticatedTransport; + if (tokenString == null) { + authenticatedTransport = authentication.authenticate(rawTransport, address.getHost()); + } + else { + authenticatedTransport = authentication.authenticateWithToken(rawTransport, tokenString); + } if (!authenticatedTransport.isOpen()) { authenticatedTransport.open(); } diff --git a/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/TestCachingHiveMetastore.java b/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/TestCachingHiveMetastore.java index ea056ae445b54..f899da36a4c15 100644 --- a/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/TestCachingHiveMetastore.java +++ b/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/TestCachingHiveMetastore.java @@ -13,6 +13,7 @@ */ package com.facebook.presto.hive.metastore; +import com.facebook.presto.hive.MetastoreClientConfig; import com.facebook.presto.hive.metastore.thrift.BridgingHiveMetastore; import com.facebook.presto.hive.metastore.thrift.HiveCluster; import com.facebook.presto.hive.metastore.thrift.HiveMetastoreClient; @@ -22,11 +23,15 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Iterables; +import com.google.common.net.HostAndPort; import com.google.common.util.concurrent.ListeningExecutorService; import io.airlift.units.Duration; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; +import org.weakref.jmx.MBeanExporter; +import org.weakref.jmx.testing.TestingMBeanServer; +import java.util.List; import java.util.Map; import java.util.Optional; import java.util.concurrent.TimeUnit; @@ -51,6 +56,7 @@ public class TestCachingHiveMetastore private MockHiveMetastoreClient mockClient; private CachingHiveMetastore metastore; private ThriftHiveMetastoreStats stats; + private List metastoreStats; @BeforeMethod public void setUp() @@ -58,7 +64,7 @@ public void setUp() mockClient = new MockHiveMetastoreClient(); MockHiveCluster mockHiveCluster = new MockHiveCluster(mockClient); ListeningExecutorService executor = listeningDecorator(newCachedThreadPool(daemonThreadsNamed("test-%s"))); - ThriftHiveMetastore thriftHiveMetastore = new ThriftHiveMetastore(mockHiveCluster); + ThriftHiveMetastore thriftHiveMetastore = new ThriftHiveMetastore(mockHiveCluster, new MetastoreClientConfig(), new MBeanExporter(new TestingMBeanServer())); metastore = new CachingHiveMetastore( new BridgingHiveMetastore(thriftHiveMetastore), executor, @@ -66,6 +72,7 @@ public void setUp() new Duration(1, TimeUnit.MINUTES), 1000); stats = thriftHiveMetastore.getStats(); + metastoreStats = thriftHiveMetastore.getAllStats(); } @Test @@ -124,6 +131,11 @@ public void testInvalidDbGetTable() assertEquals(stats.getGetTable().getThriftExceptions().getTotalCount(), 0); assertEquals(stats.getGetTable().getTotalFailures().getTotalCount(), 0); + assertNotNull(stats.getGetTable().getTime()); + + assertEquals(metastoreStats.get(0).getGetTable().getThriftExceptions().getTotalCount(), 0); + assertEquals(metastoreStats.get(0).getGetTable().getTotalFailures().getTotalCount(), 0); + assertNotNull(metastoreStats.get(0).getGetTable().getTime()); } @Test @@ -267,7 +279,13 @@ private MockHiveCluster(HiveMetastoreClient client) } @Override - public HiveMetastoreClient createMetastoreClient() + public List getAddresses() + { + return ImmutableList.of(HostAndPort.fromHost("localhost")); + } + + @Override + public HiveMetastoreClient createMetastoreClient(String token) { return client; } diff --git a/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/TestMetastoreClientConfig.java b/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/TestMetastoreClientConfig.java index 92db54c36d355..22b47e3cd4c26 100644 --- a/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/TestMetastoreClientConfig.java +++ b/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/TestMetastoreClientConfig.java @@ -40,7 +40,9 @@ public void testDefaults() .setMaxMetastoreRefreshThreads(100) .setRecordingPath(null) .setRecordingDuration(new Duration(0, TimeUnit.MINUTES)) - .setReplay(false)); + .setReplay(false) + .setMetastoreImpersonationEnabled(false) + .setMetastoreDefaultImpersonationUser("")); } @Test @@ -59,6 +61,8 @@ public void testExplicitPropertyMappings() .put("hive.metastore-recording-path", "/foo/bar") .put("hive.metastore-recoding-duration", "42s") .put("hive.replay-metastore-recording", "true") + .put("hive.metastore.impersonation.enabled", "true") + .put("hive.metastore.impersonation.user", "test") .build(); MetastoreClientConfig expected = new MetastoreClientConfig() @@ -73,7 +77,9 @@ public void testExplicitPropertyMappings() .setMaxMetastoreRefreshThreads(2500) .setRecordingPath("/foo/bar") .setRecordingDuration(new Duration(42, TimeUnit.SECONDS)) - .setReplay(true); + .setReplay(true) + .setMetastoreImpersonationEnabled(true) + .setMetastoreDefaultImpersonationUser("test"); ConfigAssertions.assertFullMapping(properties, expected); } diff --git a/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/thrift/MockHiveMetastoreClient.java b/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/thrift/MockHiveMetastoreClient.java index cf618a10d8f5d..ed97795bdc7b5 100644 --- a/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/thrift/MockHiveMetastoreClient.java +++ b/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/thrift/MockHiveMetastoreClient.java @@ -17,6 +17,7 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Lists; +import com.google.common.net.HostAndPort; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; @@ -34,6 +35,7 @@ import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Client; import org.apache.thrift.TException; import java.util.List; @@ -84,6 +86,18 @@ public List getAllDatabases() return ImmutableList.of(TEST_DATABASE); } + @Override + public Client getMetastoreClient() + { + return null; + } + + @Override + public HostAndPort getAddress() + { + return HostAndPort.fromHost("localhost"); + } + @Override public List getAllTables(String dbName) { diff --git a/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/thrift/MockHiveMetastoreClientFactory.java b/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/thrift/MockHiveMetastoreClientFactory.java index 216ff776e9e3e..36a7cd91df4f5 100644 --- a/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/thrift/MockHiveMetastoreClientFactory.java +++ b/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/thrift/MockHiveMetastoreClientFactory.java @@ -37,7 +37,7 @@ public MockHiveMetastoreClientFactory(Optional socksProxy, Duration } @Override - public HiveMetastoreClient create(HostAndPort address) + public HiveMetastoreClient create(HostAndPort address, String token) throws TTransportException { checkState(!clients.isEmpty(), "mock not given enough clients"); diff --git a/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/thrift/TestStaticHiveCluster.java b/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/thrift/TestStaticHiveCluster.java index da5f947fdd787..eb8dd9a01b7bc 100644 --- a/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/thrift/TestStaticHiveCluster.java +++ b/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/thrift/TestStaticHiveCluster.java @@ -51,7 +51,7 @@ public void testDefaultHiveMetastore() throws TException { HiveCluster cluster = createHiveCluster(CONFIG_WITH_FALLBACK, singletonList(DEFAULT_CLIENT)); - assertEquals(cluster.createMetastoreClient(), DEFAULT_CLIENT); + assertEquals(cluster.createMetastoreClient(null), DEFAULT_CLIENT); } @Test @@ -59,7 +59,7 @@ public void testFallbackHiveMetastore() throws TException { HiveCluster cluster = createHiveCluster(CONFIG_WITH_FALLBACK, asList(null, null, FALLBACK_CLIENT)); - assertEquals(cluster.createMetastoreClient(), FALLBACK_CLIENT); + assertEquals(cluster.createMetastoreClient(null), FALLBACK_CLIENT); } @Test @@ -81,7 +81,7 @@ public void testFallbackHiveMetastoreWithHiveUser() throws TException { HiveCluster cluster = createHiveCluster(CONFIG_WITH_FALLBACK_WITH_USER, asList(null, null, FALLBACK_CLIENT)); - assertEquals(cluster.createMetastoreClient(), FALLBACK_CLIENT); + assertEquals(cluster.createMetastoreClient(null), FALLBACK_CLIENT); } @Test @@ -94,7 +94,7 @@ public void testMetastoreFailedWithoutFallbackWithHiveUser() private static void assertCreateClientFails(HiveCluster cluster, String message) { try { - cluster.createMetastoreClient(); + cluster.createMetastoreClient(null); fail("expected exception"); } catch (TException e) { diff --git a/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/thrift/TestStaticMetastoreConfig.java b/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/thrift/TestStaticMetastoreConfig.java index f76c2b3c611fb..f493c38ae816b 100644 --- a/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/thrift/TestStaticMetastoreConfig.java +++ b/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/thrift/TestStaticMetastoreConfig.java @@ -32,7 +32,8 @@ public void testDefaults() { assertRecordedDefaults(recordDefaults(StaticMetastoreConfig.class) .setMetastoreUris(null) - .setMetastoreUsername(null)); + .setMetastoreUsername(null) + .setMultipleMetastoreEnabled(false)); } @Test @@ -41,11 +42,13 @@ public void testExplicitPropertyMappingsSingleMetastore() Map properties = new ImmutableMap.Builder() .put("hive.metastore.uri", "thrift://localhost:9083") .put("hive.metastore.username", "presto") + .put("hive.metastore.multiple-instance.enabled", "true") .build(); StaticMetastoreConfig expected = new StaticMetastoreConfig() .setMetastoreUris("thrift://localhost:9083") - .setMetastoreUsername("presto"); + .setMetastoreUsername("presto") + .setMultipleMetastoreEnabled(true); assertFullMapping(properties, expected); assertEquals(expected.getMetastoreUris(), ImmutableList.of(URI.create("thrift://localhost:9083"))); @@ -58,11 +61,13 @@ public void testExplicitPropertyMappingsMultipleMetastores() Map properties = new ImmutableMap.Builder() .put("hive.metastore.uri", "thrift://localhost:9083,thrift://192.0.2.3:8932") .put("hive.metastore.username", "presto") + .put("hive.metastore.multiple-instance.enabled", "true") .build(); StaticMetastoreConfig expected = new StaticMetastoreConfig() .setMetastoreUris("thrift://localhost:9083,thrift://192.0.2.3:8932") - .setMetastoreUsername("presto"); + .setMetastoreUsername("presto") + .setMultipleMetastoreEnabled(true); assertFullMapping(properties, expected); assertEquals(expected.getMetastoreUris(), ImmutableList.of( diff --git a/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/thrift/TestingHiveCluster.java b/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/thrift/TestingHiveCluster.java index 570f66c9cb1e3..2f2a9acd06243 100644 --- a/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/thrift/TestingHiveCluster.java +++ b/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/thrift/TestingHiveCluster.java @@ -15,9 +15,11 @@ import com.facebook.presto.hive.MetastoreClientConfig; import com.facebook.presto.hive.authentication.NoHiveMetastoreAuthentication; +import com.google.common.collect.ImmutableList; import com.google.common.net.HostAndPort; import org.apache.thrift.TException; +import java.util.List; import java.util.Objects; import static java.util.Objects.requireNonNull; @@ -35,10 +37,16 @@ public TestingHiveCluster(MetastoreClientConfig metastoreClientConfig, String ho } @Override - public HiveMetastoreClient createMetastoreClient() + public List getAddresses() + { + return ImmutableList.of(address); + } + + @Override + public HiveMetastoreClient createMetastoreClient(String token) throws TException { - return new HiveMetastoreClientFactory(metastoreClientConfig, new NoHiveMetastoreAuthentication()).create(address); + return new HiveMetastoreClientFactory(metastoreClientConfig, new NoHiveMetastoreAuthentication()).create(address, token); } @Override diff --git a/presto-hive/pom.xml b/presto-hive/pom.xml index c1347d54f8032..0f525395b8718 100644 --- a/presto-hive/pom.xml +++ b/presto-hive/pom.xml @@ -244,6 +244,11 @@ runtime + + commons-codec + commons-codec + + javax.inject javax.inject diff --git a/presto-hive/src/main/java/com/facebook/presto/hive/HiveMetadata.java b/presto-hive/src/main/java/com/facebook/presto/hive/HiveMetadata.java index 1800fcc61c866..1e096c83a50b3 100644 --- a/presto-hive/src/main/java/com/facebook/presto/hive/HiveMetadata.java +++ b/presto-hive/src/main/java/com/facebook/presto/hive/HiveMetadata.java @@ -73,6 +73,7 @@ import com.facebook.presto.spi.relation.RowExpression; import com.facebook.presto.spi.relation.RowExpressionService; import com.facebook.presto.spi.relation.SpecialFormExpression; +import com.facebook.presto.spi.security.ConnectorIdentity; import com.facebook.presto.spi.security.GrantInfo; import com.facebook.presto.spi.security.PrestoPrincipal; import com.facebook.presto.spi.security.Privilege; @@ -373,14 +374,14 @@ public SemiTransactionalHiveMetastore getMetastore() @Override public List listSchemaNames(ConnectorSession session) { - return metastore.getAllDatabases(); + return metastore.getAllDatabases(session.getIdentity()); } @Override public HiveTableHandle getTableHandle(ConnectorSession session, SchemaTableName tableName) { requireNonNull(tableName, "tableName is null"); - Optional
table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()); + Optional
table = metastore.getTable(session.getIdentity(), tableName.getSchemaName(), tableName.getTableName()); if (!table.isPresent()) { return null; } @@ -405,7 +406,7 @@ public ConnectorTableHandle getTableHandleForStatisticsCollection(ConnectorSessi return null; } Optional>> partitionValuesList = getPartitionList(analyzeProperties); - ConnectorTableMetadata tableMetadata = getTableMetadata(handle.getSchemaTableName()); + ConnectorTableMetadata tableMetadata = getTableMetadata(session.getIdentity(), handle.getSchemaTableName()); handle = handle.withAnalyzePartitionValues(partitionValuesList); List partitionedBy = getPartitionedBy(tableMetadata.getProperties()); @@ -423,14 +424,14 @@ public Optional getSystemTable(ConnectorSession session, SchemaTabl return getPartitionsSystemTable(session, tableName, SystemTableHandler.PARTITIONS.getSourceTableName(tableName)); } if (SystemTableHandler.PROPERTIES.matches(tableName)) { - return getPropertiesSystemTable(tableName, SystemTableHandler.PROPERTIES.getSourceTableName(tableName)); + return getPropertiesSystemTable(session, tableName, SystemTableHandler.PROPERTIES.getSourceTableName(tableName)); } return Optional.empty(); } - private Optional getPropertiesSystemTable(SchemaTableName tableName, SchemaTableName sourceTableName) + private Optional getPropertiesSystemTable(ConnectorSession session, SchemaTableName tableName, SchemaTableName sourceTableName) { - Optional
table = metastore.getTable(sourceTableName.getSchemaName(), sourceTableName.getTableName()); + Optional
table = metastore.getTable(session.getIdentity(), sourceTableName.getSchemaName(), sourceTableName.getTableName()); if (!table.isPresent() || table.get().getTableType().equals(VIRTUAL_VIEW)) { throw new TableNotFoundException(tableName); } @@ -454,7 +455,7 @@ private Optional getPartitionsSystemTable(ConnectorSession session, return Optional.empty(); } - List partitionColumns = getPartitionColumns(sourceTableName); + List partitionColumns = getPartitionColumns(session.getIdentity(), sourceTableName); if (partitionColumns.isEmpty()) { return Optional.empty(); } @@ -496,9 +497,9 @@ private Optional getPartitionsSystemTable(ConnectorSession session, })); } - private List getPartitionColumns(SchemaTableName tableName) + private List getPartitionColumns(ConnectorIdentity identity, SchemaTableName tableName) { - Table sourceTable = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()).get(); + Table sourceTable = metastore.getTable(identity, tableName.getSchemaName(), tableName.getTableName()).get(); return getPartitionKeyColumnHandles(sourceTable); } @@ -507,12 +508,12 @@ public ConnectorTableMetadata getTableMetadata(ConnectorSession session, Connect { requireNonNull(tableHandle, "tableHandle is null"); SchemaTableName tableName = schemaTableName(tableHandle); - return getTableMetadata(tableName); + return getTableMetadata(session.getIdentity(), tableName); } - private ConnectorTableMetadata getTableMetadata(SchemaTableName tableName) + private ConnectorTableMetadata getTableMetadata(ConnectorIdentity identity, SchemaTableName tableName) { - Optional
table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()); + Optional
table = metastore.getTable(identity, tableName.getSchemaName(), tableName.getTableName()); if (!table.isPresent() || table.get().getTableType().equals(VIRTUAL_VIEW)) { throw new TableNotFoundException(tableName); } @@ -607,7 +608,7 @@ public List listTables(ConnectorSession session, String schemaN { ImmutableList.Builder tableNames = ImmutableList.builder(); for (String schemaName : listSchemas(session, schemaNameOrNull)) { - for (String tableName : metastore.getAllTables(schemaName).orElse(emptyList())) { + for (String tableName : metastore.getAllTables(session.getIdentity(), schemaName).orElse(emptyList())) { tableNames.add(new SchemaTableName(schemaName, tableName)); } } @@ -626,7 +627,7 @@ private List listSchemas(ConnectorSession session, String schemaNameOrNu public Map getColumnHandles(ConnectorSession session, ConnectorTableHandle tableHandle) { SchemaTableName tableName = schemaTableName(tableHandle); - Optional
table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()); + Optional
table = metastore.getTable(session.getIdentity(), tableName.getSchemaName(), tableName.getTableName()); if (!table.isPresent()) { throw new TableNotFoundException(tableName); } @@ -645,7 +646,7 @@ public Map> listTableColumns(ConnectorSess ImmutableMap.Builder> columns = ImmutableMap.builder(); for (SchemaTableName tableName : listTables(session, prefix)) { try { - columns.put(tableName, getTableMetadata(tableName).getColumns()); + columns.put(tableName, getTableMetadata(session.getIdentity(), tableName).getColumns()); } catch (HiveViewNotSupportedException e) { // view is not supported @@ -764,7 +765,7 @@ public void createSchema(ConnectorSession session, String schemaName, Map table = metastore.getTable(schemaName, tableName); + Optional
table = metastore.getTable(identity, schemaName, tableName); if (!table.isPresent()) { throw new TableNotFoundException(new SchemaTableName(schemaName, tableName)); @@ -1153,7 +1154,7 @@ private void failIfAvroSchemaIsSet(HiveTableHandle handle) public void renameTable(ConnectorSession session, ConnectorTableHandle tableHandle, SchemaTableName newTableName) { HiveTableHandle handle = (HiveTableHandle) tableHandle; - metastore.renameTable(handle.getSchemaName(), handle.getTableName(), newTableName.getSchemaName(), newTableName.getTableName()); + metastore.renameTable(session.getIdentity(), handle.getSchemaName(), handle.getTableName(), newTableName.getSchemaName(), newTableName.getTableName()); } @Override @@ -1162,7 +1163,7 @@ public void dropTable(ConnectorSession session, ConnectorTableHandle tableHandle HiveTableHandle handle = (HiveTableHandle) tableHandle; SchemaTableName tableName = schemaTableName(tableHandle); - Optional
target = metastore.getTable(handle.getSchemaName(), handle.getTableName()); + Optional
target = metastore.getTable(session.getIdentity(), handle.getSchemaName(), handle.getTableName()); if (!target.isPresent()) { throw new TableNotFoundException(tableName); } @@ -1176,7 +1177,7 @@ public ConnectorTableHandle beginStatisticsCollection(ConnectorSession session, HiveTableHandle handle = (HiveTableHandle) tableHandle; SchemaTableName tableName = handle.getSchemaTableName(); - metastore.getTable(tableName.getSchemaName(), tableName.getTableName()) + metastore.getTable(session.getIdentity(), tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new TableNotFoundException(tableName)); return handle; } @@ -1186,7 +1187,7 @@ public void finishStatisticsCollection(ConnectorSession session, ConnectorTableH { HiveTableHandle handle = (HiveTableHandle) tableHandle; SchemaTableName tableName = handle.getSchemaTableName(); - Table table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()) + Table table = metastore.getTable(session.getIdentity(), tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new TableNotFoundException(handle.getSchemaTableName())); List partitionColumns = table.getPartitionColumns(); @@ -1202,7 +1203,7 @@ public void finishStatisticsCollection(ConnectorSession session, ConnectorTableH if (partitionColumns.isEmpty()) { // commit analyze to unpartitioned table - metastore.setTableStatistics(table, createPartitionStatistics(session, columnTypes, computedStatisticsMap.get(ImmutableList.of()))); + metastore.setTableStatistics(session.getIdentity(), table, createPartitionStatistics(session, columnTypes, computedStatisticsMap.get(ImmutableList.of()))); } else { List> partitionValuesList; @@ -1210,7 +1211,7 @@ public void finishStatisticsCollection(ConnectorSession session, ConnectorTableH partitionValuesList = handle.getAnalyzePartitionValues().get(); } else { - partitionValuesList = metastore.getPartitionNames(handle.getSchemaName(), handle.getTableName()) + partitionValuesList = metastore.getPartitionNames(session.getIdentity(), handle.getSchemaName(), handle.getTableName()) .orElseThrow(() -> new TableNotFoundException(((HiveTableHandle) tableHandle).getSchemaTableName())) .stream() .map(MetastoreUtil::toPartitionValues) @@ -1221,7 +1222,7 @@ public void finishStatisticsCollection(ConnectorSession session, ConnectorTableH Map> columnStatisticTypes = hiveColumnHandles.stream() .filter(columnHandle -> !partitionColumnNames.contains(columnHandle.getName())) .filter(column -> !column.isHidden()) - .collect(toImmutableMap(HiveColumnHandle::getName, column -> ImmutableSet.copyOf(metastore.getSupportedColumnStatistics(typeManager.getType(column.getTypeSignature()))))); + .collect(toImmutableMap(HiveColumnHandle::getName, column -> ImmutableSet.copyOf(metastore.getSupportedColumnStatistics(session.getIdentity(), typeManager.getType(column.getTypeSignature()))))); Supplier emptyPartitionStatistics = Suppliers.memoize(() -> createEmptyPartitionStatistics(columnTypes, columnStatisticTypes)); int usedComputedStatistics = 0; @@ -1236,7 +1237,7 @@ public void finishStatisticsCollection(ConnectorSession session, ConnectorTableH } } verify(usedComputedStatistics == computedStatistics.size(), "All computed statistics must be used"); - metastore.setPartitionStatistics(table, partitionStatistics.build()); + metastore.setPartitionStatistics(session.getIdentity(), table, partitionStatistics.build()); } } @@ -1284,7 +1285,7 @@ public HiveOutputTableHandle beginCreateTable(ConnectorSession session, Connecto tableName, columnHandles, session.getQueryId(), - metastore.generatePageSinkMetadata(schemaTableName), + metastore.generatePageSinkMetadata(session.getIdentity(), schemaTableName), locationHandle, tableStorageFormat, partitionStorageFormat, @@ -1496,7 +1497,7 @@ public HiveInsertTableHandle beginInsert(ConnectorSession session, ConnectorTabl verifyJvmTimeZone(); SchemaTableName tableName = schemaTableName(tableHandle); - Optional
table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()); + Optional
table = metastore.getTable(session.getIdentity(), tableName.getSchemaName(), tableName.getTableName()); if (!table.isPresent()) { throw new TableNotFoundException(tableName); } @@ -1536,7 +1537,7 @@ public HiveInsertTableHandle beginInsert(ConnectorSession session, ConnectorTabl tableName.getTableName(), handles, session.getQueryId(), - metastore.generatePageSinkMetadata(tableName), + metastore.generatePageSinkMetadata(session.getIdentity(), tableName), locationHandle, table.get().getStorage().getBucketProperty(), decodePreferredOrderingColumnsFromStorage(table.get().getStorage()), @@ -1578,7 +1579,7 @@ public Optional finishInsert(ConnectorSession session, HiveStorageFormat tableStorageFormat = handle.getTableStorageFormat(); partitionUpdates = PartitionUpdate.mergePartitionUpdates(partitionUpdates); - Optional
table = metastore.getTable(handle.getSchemaName(), handle.getTableName()); + Optional
table = metastore.getTable(session.getIdentity(), handle.getSchemaName(), handle.getTableName()); if (!table.isPresent()) { throw new TableNotFoundException(new SchemaTableName(handle.getSchemaName(), handle.getTableName())); } @@ -1792,13 +1793,13 @@ public void createView(ConnectorSession session, ConnectorTableMetadata viewMeta Table table = tableBuilder.build(); PrincipalPrivileges principalPrivileges = buildInitialPrivilegeSet(session.getUser()); - Optional
existing = metastore.getTable(viewName.getSchemaName(), viewName.getTableName()); + Optional
existing = metastore.getTable(session.getIdentity(), viewName.getSchemaName(), viewName.getTableName()); if (existing.isPresent()) { if (!replace || !MetastoreUtil.isPrestoView(existing.get())) { throw new ViewAlreadyExistsException(viewName); } - metastore.replaceView(viewName.getSchemaName(), viewName.getTableName(), table, principalPrivileges); + metastore.replaceView(session.getIdentity(), viewName.getSchemaName(), viewName.getTableName(), table, principalPrivileges); return; } @@ -1831,7 +1832,7 @@ public List listViews(ConnectorSession session, String schemaNa { ImmutableList.Builder tableNames = ImmutableList.builder(); for (String schemaName : listSchemas(session, schemaNameOrNull)) { - for (String tableName : metastore.getAllViews(schemaName).orElse(emptyList())) { + for (String tableName : metastore.getAllViews(session.getIdentity(), schemaName).orElse(emptyList())) { tableNames.add(new SchemaTableName(schemaName, tableName)); } } @@ -1851,7 +1852,7 @@ public Map getViews(ConnectorSession s } for (SchemaTableName schemaTableName : tableNames) { - Optional
table = metastore.getTable(schemaTableName.getSchemaName(), schemaTableName.getTableName()); + Optional
table = metastore.getTable(session.getIdentity(), schemaTableName.getSchemaName(), schemaTableName.getTableName()); if (table.isPresent() && MetastoreUtil.isPrestoView(table.get())) { views.put(schemaTableName, new ConnectorViewDefinition( schemaTableName, @@ -1881,7 +1882,7 @@ public OptionalLong metadataDelete(ConnectorSession session, ConnectorTableHandl HiveTableHandle handle = (HiveTableHandle) tableHandle; HiveTableLayoutHandle layoutHandle = (HiveTableLayoutHandle) tableLayoutHandle; - Optional
table = metastore.getTable(handle.getSchemaName(), handle.getTableName()); + Optional
table = metastore.getTable(session.getIdentity(), handle.getSchemaName(), handle.getTableName()); if (!table.isPresent()) { throw new TableNotFoundException(handle.getSchemaTableName()); } @@ -2075,7 +2076,7 @@ public ConnectorTableLayout getTableLayout(ConnectorSession session, ConnectorTa Optional tablePartitioning = Optional.empty(); SchemaTableName tableName = hiveLayoutHandle.getSchemaTableName(); - Table table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()) + Table table = metastore.getTable(session.getIdentity(), tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new TableNotFoundException(tableName)); // never ignore table bucketing for temporary tables as those are created such explicitly by the engine request boolean bucketExecutionEnabled = table.getTableType().equals(TEMPORARY_TABLE) || isBucketExecutionEnabled(session); @@ -2288,7 +2289,7 @@ public Optional getInsertLayout(ConnectorSession sessio { HiveTableHandle hiveTableHandle = (HiveTableHandle) tableHandle; SchemaTableName tableName = hiveTableHandle.getSchemaTableName(); - Table table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()) + Table table = metastore.getTable(session.getIdentity(), tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new TableNotFoundException(tableName)); Optional hiveBucketHandle = getHiveBucketHandle(table); @@ -2318,7 +2319,7 @@ public Optional getPreferredShuffleLayoutForInsert(Conn { HiveTableHandle hiveTableHandle = (HiveTableHandle) tableHandle; SchemaTableName tableName = hiveTableHandle.getSchemaTableName(); - Table table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()) + Table table = metastore.getTable(session.getIdentity(), tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new TableNotFoundException(tableName)); Optional hiveBucketHandle = getHiveBucketHandle(table); @@ -2414,22 +2415,22 @@ public TableStatisticsMetadata getStatisticsCollectionMetadataForWrite(Connector return TableStatisticsMetadata.empty(); } List partitionedBy = firstNonNull(getPartitionedBy(tableMetadata.getProperties()), ImmutableList.of()); - return getStatisticsCollectionMetadata(tableMetadata.getColumns(), partitionedBy, false); + return getStatisticsCollectionMetadata(session.getIdentity(), tableMetadata.getColumns(), partitionedBy, false); } @Override public TableStatisticsMetadata getStatisticsCollectionMetadata(ConnectorSession session, ConnectorTableMetadata tableMetadata) { List partitionedBy = firstNonNull(getPartitionedBy(tableMetadata.getProperties()), ImmutableList.of()); - return getStatisticsCollectionMetadata(tableMetadata.getColumns(), partitionedBy, true); + return getStatisticsCollectionMetadata(session.getIdentity(), tableMetadata.getColumns(), partitionedBy, true); } - private TableStatisticsMetadata getStatisticsCollectionMetadata(List columns, List partitionedBy, boolean includeRowCount) + private TableStatisticsMetadata getStatisticsCollectionMetadata(ConnectorIdentity identity, List columns, List partitionedBy, boolean includeRowCount) { Set columnStatistics = columns.stream() .filter(column -> !partitionedBy.contains(column.getName())) .filter(column -> !column.isHidden()) - .map(this::getColumnStatisticMetadata) + .map(column -> this.getColumnStatisticMetadata(identity, column)) .flatMap(List::stream) .collect(toImmutableSet()); @@ -2437,9 +2438,9 @@ private TableStatisticsMetadata getStatisticsCollectionMetadata(List getColumnStatisticMetadata(ColumnMetadata columnMetadata) + private List getColumnStatisticMetadata(ConnectorIdentity identity, ColumnMetadata columnMetadata) { - return getColumnStatisticMetadata(columnMetadata.getName(), metastore.getSupportedColumnStatistics(columnMetadata.getType())); + return getColumnStatisticMetadata(columnMetadata.getName(), metastore.getSupportedColumnStatistics(identity, columnMetadata.getType())); } private List getColumnStatisticMetadata(String columnName, Set statisticTypes) @@ -2455,51 +2456,51 @@ public void createRole(ConnectorSession session, String role, Optional listRoles(ConnectorSession session) { - return ImmutableSet.copyOf(metastore.listRoles()); + return ImmutableSet.copyOf(metastore.listRoles(session.getIdentity())); } @Override public Set listRoleGrants(ConnectorSession session, PrestoPrincipal principal) { - return ImmutableSet.copyOf(metastore.listRoleGrants(principal)); + return ImmutableSet.copyOf(metastore.listRoleGrants(session.getIdentity(), principal)); } @Override public void grantRoles(ConnectorSession session, Set roles, Set grantees, boolean withAdminOption, Optional grantor) { - metastore.grantRoles(roles, grantees, withAdminOption, grantor.orElse(new PrestoPrincipal(USER, session.getUser()))); + metastore.grantRoles(session.getIdentity(), roles, grantees, withAdminOption, grantor.orElse(new PrestoPrincipal(USER, session.getUser()))); } @Override public void revokeRoles(ConnectorSession session, Set roles, Set grantees, boolean adminOptionFor, Optional grantor) { - metastore.revokeRoles(roles, grantees, adminOptionFor, grantor.orElse(new PrestoPrincipal(USER, session.getUser()))); + metastore.revokeRoles(session.getIdentity(), roles, grantees, adminOptionFor, grantor.orElse(new PrestoPrincipal(USER, session.getUser()))); } @Override public Set listApplicableRoles(ConnectorSession session, PrestoPrincipal principal) { - return ThriftMetastoreUtil.listApplicableRoles(principal, metastore::listRoleGrants) + return ThriftMetastoreUtil.listApplicableRoles(principal, (PrestoPrincipal p) -> metastore.listRoleGrants(session.getIdentity(), p)) .collect(toImmutableSet()); } @Override public Set listEnabledRoles(ConnectorSession session) { - return ThriftMetastoreUtil.listEnabledRoles(session.getIdentity(), metastore::listRoleGrants) + return ThriftMetastoreUtil.listEnabledRoles(session.getIdentity(), (PrestoPrincipal p) -> metastore.listRoleGrants(session.getIdentity(), p)) .collect(toImmutableSet()); } @@ -2513,7 +2514,7 @@ public void grantTablePrivileges(ConnectorSession session, SchemaTableName schem .map(privilege -> new HivePrivilegeInfo(toHivePrivilege(privilege), grantOption, new PrestoPrincipal(USER, session.getUser()), new PrestoPrincipal(USER, session.getUser()))) .collect(toSet()); - metastore.grantTablePrivileges(schemaName, tableName, grantee, hivePrivilegeInfos); + metastore.grantTablePrivileges(session.getIdentity(), schemaName, tableName, grantee, hivePrivilegeInfos); } @Override @@ -2526,7 +2527,7 @@ public void revokeTablePrivileges(ConnectorSession session, SchemaTableName sche .map(privilege -> new HivePrivilegeInfo(toHivePrivilege(privilege), grantOption, new PrestoPrincipal(USER, session.getUser()), new PrestoPrincipal(USER, session.getUser()))) .collect(toSet()); - metastore.revokeTablePrivileges(schemaName, tableName, grantee, hivePrivilegeInfos); + metastore.revokeTablePrivileges(session.getIdentity(), schemaName, tableName, grantee, hivePrivilegeInfos); } @Override @@ -2538,11 +2539,11 @@ public List listTablePrivileges(ConnectorSession session, SchemaTable ImmutableList.Builder result = ImmutableList.builder(); for (SchemaTableName tableName : listTables(session, schemaTablePrefix)) { if (isAdminRoleSet) { - result.addAll(buildGrants(tableName, null)); + result.addAll(buildGrants(session.getIdentity(), tableName, null)); } else { for (PrestoPrincipal grantee : principals) { - result.addAll(buildGrants(tableName, grantee)); + result.addAll(buildGrants(session.getIdentity(), tableName, grantee)); } } } @@ -2563,10 +2564,10 @@ public CompletableFuture commitPageSinkAsync(ConnectorSession session, Con return toCompletableFuture(stagingFileCommitter.commitFiles(session, handle.getSchemaName(), handle.getTableName(), getPartitionUpdates(fragments))); } - private List buildGrants(SchemaTableName tableName, PrestoPrincipal principal) + private List buildGrants(ConnectorIdentity identity, SchemaTableName tableName, PrestoPrincipal principal) { ImmutableList.Builder result = ImmutableList.builder(); - Set hivePrivileges = metastore.listTablePrivileges(tableName.getSchemaName(), tableName.getTableName(), principal); + Set hivePrivileges = metastore.listTablePrivileges(identity, tableName.getSchemaName(), tableName.getTableName(), principal); for (HivePrivilegeInfo hivePrivilege : hivePrivileges) { Set prestoPrivileges = hivePrivilege.toPrivilegeInfo(); for (PrivilegeInfo prestoPrivilege : prestoPrivileges) { diff --git a/presto-hive/src/main/java/com/facebook/presto/hive/HiveMetadataFactory.java b/presto-hive/src/main/java/com/facebook/presto/hive/HiveMetadataFactory.java index 5b209cac4ceb9..414a65c4b56fd 100644 --- a/presto-hive/src/main/java/com/facebook/presto/hive/HiveMetadataFactory.java +++ b/presto-hive/src/main/java/com/facebook/presto/hive/HiveMetadataFactory.java @@ -15,6 +15,7 @@ import com.facebook.airlift.json.JsonCodec; import com.facebook.airlift.log.Logger; +import com.facebook.presto.hive.authentication.HiveMetastoreAuthentication; import com.facebook.presto.hive.metastore.CachingHiveMetastore; import com.facebook.presto.hive.metastore.ExtendedHiveMetastore; import com.facebook.presto.hive.metastore.SemiTransactionalHiveMetastore; @@ -45,9 +46,11 @@ public class HiveMetadataFactory private final boolean createsOfNonManagedTablesEnabled; private final int maxPartitionBatchSize; private final long perTransactionCacheMaximumSize; + private final String hmsImpersonationDefaultUser; private final ExtendedHiveMetastore metastore; private final HdfsEnvironment hdfsEnvironment; private final HivePartitionManager partitionManager; + private final HiveMetastoreAuthentication metastoreAuthentication; private final DateTimeZone timeZone; private final TypeManager typeManager; private final LocationService locationService; @@ -71,6 +74,7 @@ public HiveMetadataFactory( MetastoreClientConfig metastoreClientConfig, ExtendedHiveMetastore metastore, HdfsEnvironment hdfsEnvironment, + HiveMetastoreAuthentication metastoreAuthentication, HivePartitionManager partitionManager, @ForFileRename ListeningExecutorService fileRenameExecutor, TypeManager typeManager, @@ -90,6 +94,7 @@ public HiveMetadataFactory( this( metastore, hdfsEnvironment, + metastoreAuthentication, partitionManager, hiveClientConfig.getDateTimeZone(), hiveClientConfig.getAllowCorruptWritesForTesting(), @@ -99,6 +104,7 @@ public HiveMetadataFactory( hiveClientConfig.getCreatesOfNonManagedTablesEnabled(), hiveClientConfig.getMaxPartitionBatchSize(), metastoreClientConfig.getPerTransactionMetastoreCacheMaximumSize(), + metastoreClientConfig.getMetastoreDefaultImpersonationUser(), typeManager, locationService, functionResolution, @@ -118,6 +124,7 @@ public HiveMetadataFactory( public HiveMetadataFactory( ExtendedHiveMetastore metastore, HdfsEnvironment hdfsEnvironment, + HiveMetastoreAuthentication metastoreAuthentication, HivePartitionManager partitionManager, DateTimeZone timeZone, boolean allowCorruptWritesForTesting, @@ -127,6 +134,7 @@ public HiveMetadataFactory( boolean createsOfNonManagedTablesEnabled, int maxPartitionBatchSize, long perTransactionCacheMaximumSize, + String hmsImpersonationDefaultUser, TypeManager typeManager, LocationService locationService, StandardFunctionResolution functionResolution, @@ -149,9 +157,10 @@ public HiveMetadataFactory( this.createsOfNonManagedTablesEnabled = createsOfNonManagedTablesEnabled; this.maxPartitionBatchSize = maxPartitionBatchSize; this.perTransactionCacheMaximumSize = perTransactionCacheMaximumSize; - + this.hmsImpersonationDefaultUser = requireNonNull(hmsImpersonationDefaultUser, "hmsImpersonationDefaultUser is null"); this.metastore = requireNonNull(metastore, "metastore is null"); this.hdfsEnvironment = requireNonNull(hdfsEnvironment, "hdfsEnvironment is null"); + this.metastoreAuthentication = requireNonNull(metastoreAuthentication, "metastoreAuthentication is null"); this.partitionManager = requireNonNull(partitionManager, "partitionManager is null"); this.timeZone = requireNonNull(timeZone, "timeZone is null"); this.typeManager = requireNonNull(typeManager, "typeManager is null"); @@ -184,6 +193,7 @@ public HiveMetadata get() hdfsEnvironment, CachingHiveMetastore.memoizeMetastore(this.metastore, perTransactionCacheMaximumSize), // per-transaction cache fileRenameExecutor, + metastoreAuthentication, skipDeletionForAlter, skipTargetCleanupOnRollback); @@ -206,7 +216,7 @@ public HiveMetadata get() partitionUpdateCodec, typeTranslator, prestoVersion, - new MetastoreHiveStatisticsProvider(metastore), + new MetastoreHiveStatisticsProvider(metastore, hmsImpersonationDefaultUser), stagingFileCommitter, zeroRowFileCreator, partitionObjectBuilder); diff --git a/presto-hive/src/main/java/com/facebook/presto/hive/HivePartitionManager.java b/presto-hive/src/main/java/com/facebook/presto/hive/HivePartitionManager.java index 84e5836265d2a..b1d7d27b7cdd1 100644 --- a/presto-hive/src/main/java/com/facebook/presto/hive/HivePartitionManager.java +++ b/presto-hive/src/main/java/com/facebook/presto/hive/HivePartitionManager.java @@ -27,6 +27,7 @@ import com.facebook.presto.spi.predicate.Domain; import com.facebook.presto.spi.predicate.NullableValue; import com.facebook.presto.spi.predicate.TupleDomain; +import com.facebook.presto.spi.security.ConnectorIdentity; import com.facebook.presto.spi.type.CharType; import com.facebook.presto.spi.type.Type; import com.facebook.presto.spi.type.TypeManager; @@ -113,7 +114,7 @@ public Iterable getPartitionsIterator( TupleDomain effectivePredicateColumnHandles = constraint.getSummary(); SchemaTableName tableName = hiveTableHandle.getSchemaTableName(); - Table table = getTable(metastore, tableName, isOfflineDataDebugModeEnabled(session)); + Table table = getTable(session.getIdentity(), metastore, tableName, isOfflineDataDebugModeEnabled(session)); List partitionColumns = getPartitionKeyColumnHandles(table); @@ -131,7 +132,7 @@ public Iterable getPartitionsIterator( } else { return () -> { - List filteredPartitionNames = getFilteredPartitionNames(metastore, tableName, effectivePredicate); + List filteredPartitionNames = getFilteredPartitionNames(session.getIdentity(), metastore, tableName, effectivePredicate); return filteredPartitionNames.stream() // Apply extra filters which could not be done by getFilteredPartitionNames .map(partitionName -> parseValuesAndFilterPartition(tableName, partitionName, partitionColumns, partitionTypes, constraint)) @@ -186,7 +187,7 @@ public HivePartitionResult getPartitions(SemiTransactionalHiveMetastore metastor TupleDomain effectivePredicate = constraint.getSummary(); SchemaTableName tableName = hiveTableHandle.getSchemaTableName(); - Table table = getTable(metastore, tableName, isOfflineDataDebugModeEnabled(session)); + Table table = getTable(session.getIdentity(), metastore, tableName, isOfflineDataDebugModeEnabled(session)); List partitionColumns = getPartitionKeyColumnHandles(table); @@ -285,7 +286,7 @@ public HivePartitionResult getPartitions(SemiTransactionalHiveMetastore metastor HiveTableHandle hiveTableHandle = (HiveTableHandle) tableHandle; SchemaTableName tableName = hiveTableHandle.getSchemaTableName(); - Table table = getTable(metastore, tableName, isOfflineDataDebugModeEnabled(session)); + Table table = getTable(session.getIdentity(), metastore, tableName, isOfflineDataDebugModeEnabled(session)); List partitionColumns = getPartitionKeyColumnHandles(table); List partitionColumnTypes = partitionColumns.stream() @@ -336,9 +337,9 @@ private Optional parseValuesAndFilterPartition( return Optional.of(partition); } - private Table getTable(SemiTransactionalHiveMetastore metastore, SchemaTableName tableName, boolean offlineDataDebugModeEnabled) + private Table getTable(ConnectorIdentity identity, SemiTransactionalHiveMetastore metastore, SchemaTableName tableName, boolean offlineDataDebugModeEnabled) { - Optional
target = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()); + Optional
target = metastore.getTable(identity, tableName.getSchemaName(), tableName.getTableName()); if (!target.isPresent()) { throw new TableNotFoundException(tableName); } @@ -351,14 +352,14 @@ private Table getTable(SemiTransactionalHiveMetastore metastore, SchemaTableName return table; } - private List getFilteredPartitionNames(SemiTransactionalHiveMetastore metastore, SchemaTableName tableName, Map partitionPredicates) + private List getFilteredPartitionNames(ConnectorIdentity identity, SemiTransactionalHiveMetastore metastore, SchemaTableName tableName, Map partitionPredicates) { if (partitionPredicates.isEmpty()) { return ImmutableList.of(); } // fetch the partition names - return metastore.getPartitionNamesByFilter(tableName.getSchemaName(), tableName.getTableName(), partitionPredicates) + return metastore.getPartitionNamesByFilter(identity, tableName.getSchemaName(), tableName.getTableName(), partitionPredicates) .orElseThrow(() -> new TableNotFoundException(tableName)); } diff --git a/presto-hive/src/main/java/com/facebook/presto/hive/HiveSplitManager.java b/presto-hive/src/main/java/com/facebook/presto/hive/HiveSplitManager.java index bc0e86c54e5b6..47da4a5a6aad1 100644 --- a/presto-hive/src/main/java/com/facebook/presto/hive/HiveSplitManager.java +++ b/presto-hive/src/main/java/com/facebook/presto/hive/HiveSplitManager.java @@ -171,7 +171,7 @@ public ConnectorSplitSource getSplits( throw new PrestoException(HIVE_TRANSACTION_NOT_FOUND, format("Transaction not found: %s", transaction)); } SemiTransactionalHiveMetastore metastore = metadata.getMetastore(); - Table table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()) + Table table = metastore.getTable(session.getIdentity(), tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new TableNotFoundException(tableName)); if (!isOfflineDataDebugModeEnabled(session)) { @@ -315,6 +315,7 @@ private Iterable getPartitionMetadata( Iterable> partitionNameBatches = partitionExponentially(hivePartitions, minPartitionBatchSize, maxPartitionBatchSize); Iterable> partitionBatches = transform(partitionNameBatches, partitionBatch -> { Map> batch = metastore.getPartitionsByNames( + session.getIdentity(), tableName.getSchemaName(), tableName.getTableName(), Lists.transform(partitionBatch, HivePartition::getPartitionId)); diff --git a/presto-hive/src/main/java/com/facebook/presto/hive/HiveWriteUtils.java b/presto-hive/src/main/java/com/facebook/presto/hive/HiveWriteUtils.java index cae39a0dae2ed..fb140e15fd2d8 100644 --- a/presto-hive/src/main/java/com/facebook/presto/hive/HiveWriteUtils.java +++ b/presto-hive/src/main/java/com/facebook/presto/hive/HiveWriteUtils.java @@ -27,6 +27,7 @@ import com.facebook.presto.spi.SchemaNotFoundException; import com.facebook.presto.spi.SchemaTableName; import com.facebook.presto.spi.block.Block; +import com.facebook.presto.spi.security.ConnectorIdentity; import com.facebook.presto.spi.type.BigintType; import com.facebook.presto.spi.type.BooleanType; import com.facebook.presto.spi.type.CharType; @@ -351,7 +352,7 @@ private static void checkWritable( public static Path getTableDefaultLocation(HdfsContext context, SemiTransactionalHiveMetastore metastore, HdfsEnvironment hdfsEnvironment, String schemaName, String tableName) { - Optional location = getDatabase(metastore, schemaName).getLocation(); + Optional location = getDatabase(context.getIdentity(), metastore, schemaName).getLocation(); if (!location.isPresent() || location.get().isEmpty()) { throw new PrestoException(HIVE_DATABASE_LOCATION_ERROR, format("Database '%s' location is not set", schemaName)); } @@ -369,9 +370,9 @@ public static Path getTableDefaultLocation(HdfsContext context, SemiTransactiona return new Path(databasePath, tableName); } - private static Database getDatabase(SemiTransactionalHiveMetastore metastore, String database) + private static Database getDatabase(ConnectorIdentity identity, SemiTransactionalHiveMetastore metastore, String database) { - return metastore.getDatabase(database).orElseThrow(() -> new SchemaNotFoundException(database)); + return metastore.getDatabase(identity, database).orElseThrow(() -> new SchemaNotFoundException(database)); } public static boolean isS3FileSystem(HdfsContext context, HdfsEnvironment hdfsEnvironment, Path path) @@ -418,11 +419,6 @@ public static Path createTemporaryPath(ConnectorSession session, HdfsContext con String temporaryPrefix = getTemporaryStagingDirectoryPath(session) .replace("${USER}", context.getIdentity().getUser()); - // use relative temporary directory on ViewFS - if (isViewFileSystem(context, hdfsEnvironment, targetPath)) { - temporaryPrefix = ".hive-staging"; - } - // create a temporary directory on the same filesystem Path temporaryRoot = new Path(targetPath, temporaryPrefix); Path temporaryPath = new Path(temporaryRoot, randomUUID().toString()); diff --git a/presto-hive/src/main/java/com/facebook/presto/hive/authentication/KerberosHiveMetastoreAuthentication.java b/presto-hive/src/main/java/com/facebook/presto/hive/authentication/KerberosHiveMetastoreAuthentication.java index 793a836b203e4..8e88ed21df07b 100644 --- a/presto-hive/src/main/java/com/facebook/presto/hive/authentication/KerberosHiveMetastoreAuthentication.java +++ b/presto-hive/src/main/java/com/facebook/presto/hive/authentication/KerberosHiveMetastoreAuthentication.java @@ -15,22 +15,39 @@ import com.facebook.presto.hive.ForHiveMetastore; import com.facebook.presto.hive.HiveClientConfig; +import com.facebook.presto.hive.MetastoreClientConfig; import com.google.common.collect.ImmutableMap; +import org.apache.commons.codec.binary.Base64; +import org.apache.hadoop.hive.thrift.DelegationTokenIdentifier; import org.apache.hadoop.hive.thrift.client.TUGIAssumingTransport; import org.apache.hadoop.security.SaslRpcServer; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.thrift.transport.TSaslClientTransport; import org.apache.thrift.transport.TTransport; import javax.inject.Inject; -import javax.security.sasl.Sasl; +import javax.security.auth.callback.Callback; +import javax.security.auth.callback.CallbackHandler; +import javax.security.auth.callback.NameCallback; +import javax.security.auth.callback.PasswordCallback; +import javax.security.auth.callback.UnsupportedCallbackException; +import javax.security.sasl.RealmCallback; +import javax.security.sasl.RealmChoiceCallback; import java.io.IOException; import java.io.UncheckedIOException; import java.util.Map; +import static com.facebook.presto.hive.authentication.UserGroupInformationUtils.executeActionInDoAs; import static com.google.common.base.Preconditions.checkState; import static java.util.Objects.requireNonNull; +import static javax.security.sasl.Sasl.QOP; +import static javax.security.sasl.Sasl.SERVER_AUTH; import static org.apache.hadoop.security.SaslRpcServer.AuthMethod.KERBEROS; +import static org.apache.hadoop.security.SaslRpcServer.AuthMethod.TOKEN; +import static org.apache.hadoop.security.SaslRpcServer.SASL_DEFAULT_REALM; import static org.apache.hadoop.security.SecurityUtil.getServerPrincipal; public class KerberosHiveMetastoreAuthentication @@ -39,21 +56,96 @@ public class KerberosHiveMetastoreAuthentication private final String hiveMetastoreServicePrincipal; private final HadoopAuthentication authentication; private final boolean hdfsWireEncryptionEnabled; + private final boolean impersonationEnabled; + private static final Map saslProperties = ImmutableMap.of(QOP, "auth", SERVER_AUTH, "true"); @Inject public KerberosHiveMetastoreAuthentication( MetastoreKerberosConfig config, @ForHiveMetastore HadoopAuthentication authentication, - HiveClientConfig hiveClientConfig) + HiveClientConfig hiveClientConfig, + MetastoreClientConfig metastoreClientConfig) { - this(config.getHiveMetastoreServicePrincipal(), authentication, hiveClientConfig.isHdfsWireEncryptionEnabled()); + this(config.getHiveMetastoreServicePrincipal(), authentication, hiveClientConfig.isHdfsWireEncryptionEnabled(), metastoreClientConfig.isMetastoreImpersonationEnabled()); } - public KerberosHiveMetastoreAuthentication(String hiveMetastoreServicePrincipal, HadoopAuthentication authentication, boolean hdfsWireEncryptionEnabled) + public KerberosHiveMetastoreAuthentication(String hiveMetastoreServicePrincipal, HadoopAuthentication authentication, boolean hdfsWireEncryptionEnabled, boolean impersonationEnabled) { this.hiveMetastoreServicePrincipal = requireNonNull(hiveMetastoreServicePrincipal, "hiveMetastoreServicePrincipal is null"); this.authentication = requireNonNull(authentication, "authentication is null"); this.hdfsWireEncryptionEnabled = hdfsWireEncryptionEnabled; + this.impersonationEnabled = impersonationEnabled; + } + + @Override + public TTransport authenticateWithToken(TTransport rawTransport, String tokenString) + { + try { + Token token = new Token(); + token.decodeFromUrlString(tokenString); + + TTransport saslTransport = new TSaslClientTransport( + TOKEN.getMechanismName(), + null, + null, + SASL_DEFAULT_REALM, + saslProperties, + new SaslClientCallbackHandler(token), + rawTransport); + return new TUGIAssumingTransport(saslTransport, UserGroupInformation.getCurrentUser()); + } + catch (IOException ex) { + throw new UncheckedIOException(ex); + } + } + + private static class SaslClientCallbackHandler + implements CallbackHandler + { + private final String userName; + private final char[] userPassword; + + public SaslClientCallbackHandler(Token token) + { + this.userName = encodeIdentifier(token.getIdentifier()); + this.userPassword = encodePassword(token.getPassword()); + } + + @Override + public void handle(Callback[] callbacks) + throws UnsupportedCallbackException + { + for (Callback callback : callbacks) { + if (callback instanceof RealmChoiceCallback) { + continue; + } + else if (callback instanceof NameCallback) { + NameCallback nameCallback = (NameCallback) callback; + nameCallback.setName(userName); + } + else if (callback instanceof PasswordCallback) { + PasswordCallback passwordCallback = (PasswordCallback) callback; + passwordCallback.setPassword(userPassword); + } + else if (callback instanceof RealmCallback) { + RealmCallback realmCallback = (RealmCallback) callback; + realmCallback.setText(realmCallback.getDefaultText()); + } + else { + throw new UnsupportedCallbackException(callback, "Unrecognized SASL client callback"); + } + } + } + + static String encodeIdentifier(byte[] identifier) + { + return new String(Base64.encodeBase64(identifier)); + } + + static char[] encodePassword(byte[] password) + { + return new String(Base64.encodeBase64(password)).toCharArray(); + } } @Override @@ -66,8 +158,8 @@ public TTransport authenticate(TTransport rawTransport, String hiveMetastoreHost "Kerberos principal name does NOT have the expected hostname part: %s", serverPrincipal); Map saslProps = ImmutableMap.of( - Sasl.QOP, hdfsWireEncryptionEnabled ? "auth-conf" : "auth", - Sasl.SERVER_AUTH, "true"); + QOP, hdfsWireEncryptionEnabled ? "auth-conf" : "auth", + SERVER_AUTH, "true"); TTransport saslTransport = new TSaslClientTransport( KERBEROS.getMechanismName(), @@ -84,4 +176,19 @@ public TTransport authenticate(TTransport rawTransport, String hiveMetastoreHost throw new UncheckedIOException(e); } } + + @Override + public R doAs(String user, GenericExceptionAction action) + throws E + { + if (impersonationEnabled) { + return executeActionInDoAs(createProxyUser(user), action); + } + return executeActionInDoAs(authentication.getUserGroupInformation(), action); + } + + private UserGroupInformation createProxyUser(String user) + { + return UserGroupInformation.createProxyUser(user, authentication.getUserGroupInformation()); + } } diff --git a/presto-hive/src/main/java/com/facebook/presto/hive/authentication/UserGroupInformationUtils.java b/presto-hive/src/main/java/com/facebook/presto/hive/authentication/UserGroupInformationUtils.java index 5656fbc736683..3c8f91dc44ed6 100644 --- a/presto-hive/src/main/java/com/facebook/presto/hive/authentication/UserGroupInformationUtils.java +++ b/presto-hive/src/main/java/com/facebook/presto/hive/authentication/UserGroupInformationUtils.java @@ -17,11 +17,11 @@ import java.security.PrivilegedAction; -final class UserGroupInformationUtils +public final class UserGroupInformationUtils { private UserGroupInformationUtils() {} - static R executeActionInDoAs(UserGroupInformation userGroupInformation, GenericExceptionAction action) + public static R executeActionInDoAs(UserGroupInformation userGroupInformation, GenericExceptionAction action) throws E { return userGroupInformation.doAs((PrivilegedAction>) () -> { diff --git a/presto-hive/src/main/java/com/facebook/presto/hive/security/LegacyAccessControl.java b/presto-hive/src/main/java/com/facebook/presto/hive/security/LegacyAccessControl.java index 0a36da6ea86aa..eab03b0e7d36e 100644 --- a/presto-hive/src/main/java/com/facebook/presto/hive/security/LegacyAccessControl.java +++ b/presto-hive/src/main/java/com/facebook/presto/hive/security/LegacyAccessControl.java @@ -100,7 +100,7 @@ public void checkCanDropTable(ConnectorTransactionHandle transaction, ConnectorI } TransactionalMetadata metadata = hiveTransactionManager.get(transaction); - Optional
target = metadata.getMetastore().getTable(tableName.getSchemaName(), tableName.getTableName()); + Optional
target = metadata.getMetastore().getTable(identity, tableName.getSchemaName(), tableName.getTableName()); if (!target.isPresent()) { denyDropTable(tableName.toString(), "Table not found"); diff --git a/presto-hive/src/main/java/com/facebook/presto/hive/security/SqlStandardAccessControl.java b/presto-hive/src/main/java/com/facebook/presto/hive/security/SqlStandardAccessControl.java index b622bf006a755..a9a8605b34a4e 100644 --- a/presto-hive/src/main/java/com/facebook/presto/hive/security/SqlStandardAccessControl.java +++ b/presto-hive/src/main/java/com/facebook/presto/hive/security/SqlStandardAccessControl.java @@ -319,7 +319,7 @@ public void checkCanRevokeRoles(ConnectorTransactionHandle transactionHandle, Co public void checkCanSetRole(ConnectorTransactionHandle transaction, ConnectorIdentity identity, AccessControlContext context, String role, String catalogName) { SemiTransactionalHiveMetastore metastore = getMetastore(transaction); - if (!isRoleApplicable(metastore, new PrestoPrincipal(USER, identity.getUser()), role)) { + if (!isRoleApplicable(metastore, identity, new PrestoPrincipal(USER, identity.getUser()), role)) { denySetRole(role); } } @@ -345,7 +345,7 @@ public void checkCanShowRoleGrants(ConnectorTransactionHandle transactionHandle, private boolean isAdmin(ConnectorTransactionHandle transaction, ConnectorIdentity identity) { SemiTransactionalHiveMetastore metastore = getMetastore(transaction); - return isRoleEnabled(identity, metastore::listRoleGrants, ADMIN_ROLE_NAME); + return isRoleEnabled(identity, (PrestoPrincipal p) -> metastore.listRoleGrants(identity, p), ADMIN_ROLE_NAME); } private boolean isDatabaseOwner(ConnectorTransactionHandle transaction, ConnectorIdentity identity, String databaseName) @@ -360,7 +360,7 @@ private boolean isDatabaseOwner(ConnectorTransactionHandle transaction, Connecto } SemiTransactionalHiveMetastore metastore = getMetastore(transaction); - Optional databaseMetadata = metastore.getDatabase(databaseName); + Optional databaseMetadata = metastore.getDatabase(identity, databaseName); if (!databaseMetadata.isPresent()) { return false; } @@ -371,7 +371,7 @@ private boolean isDatabaseOwner(ConnectorTransactionHandle transaction, Connecto if (database.getOwnerType() == USER && identity.getUser().equals(database.getOwnerName())) { return true; } - if (database.getOwnerType() == ROLE && isRoleEnabled(identity, metastore::listRoleGrants, database.getOwnerName())) { + if (database.getOwnerType() == ROLE && isRoleEnabled(identity, (PrestoPrincipal p) -> metastore.listRoleGrants(identity, p), database.getOwnerName())) { return true; } return false; @@ -416,6 +416,7 @@ private boolean hasGrantOptionForPrivilege(ConnectorTransactionHandle transactio SemiTransactionalHiveMetastore metastore = getMetastore(transaction); return listApplicableTablePrivileges( metastore, + identity, tableName.getSchemaName(), tableName.getTableName(), identity.getUser()) @@ -429,7 +430,8 @@ private boolean hasAdminOptionForRoles(ConnectorTransactionHandle transaction, C } SemiTransactionalHiveMetastore metastore = getMetastore(transaction); - Set rolesWithGrantOption = listApplicableRoles(new PrestoPrincipal(USER, identity.getUser()), metastore::listRoleGrants) + Set rolesWithGrantOption = listApplicableRoles(new PrestoPrincipal(USER, identity.getUser()), (PrestoPrincipal p) -> metastore.listRoleGrants(identity, p)) + .filter(RoleGrant::isGrantable) .map(RoleGrant::getRoleName) .collect(toSet()); diff --git a/presto-hive/src/main/java/com/facebook/presto/hive/statistics/MetastoreHiveStatisticsProvider.java b/presto-hive/src/main/java/com/facebook/presto/hive/statistics/MetastoreHiveStatisticsProvider.java index 389dee571f951..fc5a8c2e8579e 100644 --- a/presto-hive/src/main/java/com/facebook/presto/hive/statistics/MetastoreHiveStatisticsProvider.java +++ b/presto-hive/src/main/java/com/facebook/presto/hive/statistics/MetastoreHiveStatisticsProvider.java @@ -30,6 +30,7 @@ import com.facebook.presto.spi.PrestoException; import com.facebook.presto.spi.SchemaTableName; import com.facebook.presto.spi.predicate.NullableValue; +import com.facebook.presto.spi.security.ConnectorIdentity; import com.facebook.presto.spi.statistics.ColumnStatistics; import com.facebook.presto.spi.statistics.DoubleRange; import com.facebook.presto.spi.statistics.Estimate; @@ -96,10 +97,11 @@ public class MetastoreHiveStatisticsProvider private final PartitionsStatisticsProvider statisticsProvider; - public MetastoreHiveStatisticsProvider(SemiTransactionalHiveMetastore metastore) + public MetastoreHiveStatisticsProvider(SemiTransactionalHiveMetastore metastore, String hmsImpersonationDefaultUser) { requireNonNull(metastore, "metastore is null"); - this.statisticsProvider = (table, hivePartitions) -> getPartitionsStatistics(metastore, table, hivePartitions); + requireNonNull(hmsImpersonationDefaultUser, "hmsImpersonationDefaultUser is null"); + this.statisticsProvider = (table, hivePartitions) -> getPartitionsStatistics(hmsImpersonationDefaultUser, metastore, table, hivePartitions); } @VisibleForTesting @@ -108,20 +110,21 @@ public MetastoreHiveStatisticsProvider(SemiTransactionalHiveMetastore metastore) this.statisticsProvider = requireNonNull(statisticsProvider, "statisticsProvider is null"); } - private static Map getPartitionsStatistics(SemiTransactionalHiveMetastore metastore, SchemaTableName table, List hivePartitions) + private static Map getPartitionsStatistics(String hmsImpersonationDefaultUser, SemiTransactionalHiveMetastore metastore, SchemaTableName table, List hivePartitions) { if (hivePartitions.isEmpty()) { return ImmutableMap.of(); } boolean unpartitioned = hivePartitions.stream().anyMatch(partition -> partition.getPartitionId().equals(UNPARTITIONED_ID)); + ConnectorIdentity identity = new ConnectorIdentity(hmsImpersonationDefaultUser, Optional.empty(), Optional.empty()); if (unpartitioned) { checkArgument(hivePartitions.size() == 1, "expected only one hive partition"); - return ImmutableMap.of(UNPARTITIONED_ID, metastore.getTableStatistics(table.getSchemaName(), table.getTableName())); + return ImmutableMap.of(UNPARTITIONED_ID, metastore.getTableStatistics(identity, table.getSchemaName(), table.getTableName())); } Set partitionNames = hivePartitions.stream() .map(HivePartition::getPartitionId) .collect(toImmutableSet()); - return metastore.getPartitionStatistics(table.getSchemaName(), table.getTableName(), partitionNames); + return metastore.getPartitionStatistics(identity, table.getSchemaName(), table.getTableName(), partitionNames); } @Override diff --git a/presto-hive/src/test/java/com/facebook/presto/hive/AbstractTestHiveClient.java b/presto-hive/src/test/java/com/facebook/presto/hive/AbstractTestHiveClient.java index 7ce97f6c28adc..94a289b00ddce 100644 --- a/presto-hive/src/test/java/com/facebook/presto/hive/AbstractTestHiveClient.java +++ b/presto-hive/src/test/java/com/facebook/presto/hive/AbstractTestHiveClient.java @@ -19,7 +19,9 @@ import com.facebook.presto.GroupByHashPageIndexerFactory; import com.facebook.presto.hive.HdfsEnvironment.HdfsContext; import com.facebook.presto.hive.LocationService.WriteInfo; +import com.facebook.presto.hive.authentication.HiveMetastoreAuthentication; import com.facebook.presto.hive.authentication.NoHdfsAuthentication; +import com.facebook.presto.hive.authentication.NoHiveMetastoreAuthentication; import com.facebook.presto.hive.metastore.CachingHiveMetastore; import com.facebook.presto.hive.metastore.Column; import com.facebook.presto.hive.metastore.ExtendedHiveMetastore; @@ -131,6 +133,8 @@ import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; +import org.weakref.jmx.MBeanExporter; +import org.weakref.jmx.testing.TestingMBeanServer; import java.io.IOException; import java.math.BigDecimal; @@ -202,6 +206,7 @@ import static com.facebook.presto.hive.HiveTableProperties.STORAGE_FORMAT_PROPERTY; import static com.facebook.presto.hive.HiveTestUtils.FILTER_STATS_CALCULATOR_SERVICE; import static com.facebook.presto.hive.HiveTestUtils.FUNCTION_RESOLUTION; +import static com.facebook.presto.hive.HiveTestUtils.IMPERSONATIONUSER; import static com.facebook.presto.hive.HiveTestUtils.METADATA; import static com.facebook.presto.hive.HiveTestUtils.PAGE_SORTER; import static com.facebook.presto.hive.HiveTestUtils.ROW_EXPRESSION_SERVICE; @@ -707,6 +712,7 @@ private static RowExpression toRowExpression(String sql) protected DateTimeZone timeZone; protected HdfsEnvironment hdfsEnvironment; + protected HiveMetastoreAuthentication metastoreAuthentication; protected LocationService locationService; protected HiveMetadataFactory metadataFactory; @@ -890,7 +896,7 @@ protected final void setup(String host, int port, String databaseName, String ti HiveCluster hiveCluster = new TestingHiveCluster(metastoreClientConfig, host, port); ExtendedHiveMetastore metastore = new CachingHiveMetastore( - new BridgingHiveMetastore(new ThriftHiveMetastore(hiveCluster)), + new BridgingHiveMetastore(new ThriftHiveMetastore(hiveCluster, metastoreClientConfig, new MBeanExporter(new TestingMBeanServer()))), executor, Duration.valueOf("1m"), Duration.valueOf("15s"), @@ -909,10 +915,12 @@ protected final void setup(String databaseName, HiveClientConfig hiveClientConfi metastoreClient = hiveMetastore; HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(hiveClientConfig, metastoreClientConfig), ImmutableSet.of()); hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, metastoreClientConfig, new NoHdfsAuthentication()); + metastoreAuthentication = new NoHiveMetastoreAuthentication(); locationService = new HiveLocationService(hdfsEnvironment); metadataFactory = new HiveMetadataFactory( metastoreClient, hdfsEnvironment, + metastoreAuthentication, hivePartitionManager, timeZone, true, @@ -922,6 +930,7 @@ protected final void setup(String databaseName, HiveClientConfig hiveClientConfi true, getHiveClientConfig().getMaxPartitionBatchSize(), getHiveClientConfig().getMaxPartitionsPerScan(), + IMPERSONATIONUSER, TYPE_MANAGER, locationService, FUNCTION_RESOLUTION, @@ -1341,7 +1350,7 @@ protected void doTestMismatchSchemaTable( try (Transaction transaction = newTransaction()) { ConnectorSession session = newSession(); PrincipalPrivileges principalPrivileges = testingPrincipalPrivilege(session); - Table oldTable = transaction.getMetastore().getTable(schemaName, tableName).get(); + Table oldTable = transaction.getMetastore().getTable(session.getIdentity(), schemaName, tableName).get(); HiveTypeTranslator hiveTypeTranslator = new HiveTypeTranslator(); List dataColumns = tableAfter.stream() .filter(columnMetadata -> !columnMetadata.getName().equals("ds")) @@ -1349,9 +1358,7 @@ protected void doTestMismatchSchemaTable( .collect(toList()); Table.Builder newTable = Table.builder(oldTable) .setDataColumns(dataColumns); - - transaction.getMetastore().replaceView(schemaName, tableName, newTable.build(), principalPrivileges); - + transaction.getMetastore().replaceView(session.getIdentity(), schemaName, tableName, newTable.build(), principalPrivileges); transaction.commit(); } @@ -2520,7 +2527,7 @@ private void assertEmptyFile(HiveStorageFormat format) List columnHandles = filterNonHiddenColumnHandles(metadata.getColumnHandles(session, tableHandle).values()); Table table = transaction.getMetastore() - .getTable(tableName.getSchemaName(), tableName.getTableName()) + .getTable(session.getIdentity(), tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(AssertionError::new); // verify directory is empty @@ -2682,7 +2689,7 @@ public void testTableCreationIgnoreExisting() Table table = createSimpleTable(schemaTableName, columns, session, targetPath, "q1"); transaction.getMetastore() .createTable(session, table, privileges, Optional.empty(), false, EMPTY_TABLE_STATISTICS); - Optional
tableHandle = transaction.getMetastore().getTable(schemaName, tableName); + Optional
tableHandle = transaction.getMetastore().getTable(session.getIdentity(), schemaName, tableName); assertTrue(tableHandle.isPresent()); transaction.commit(); } @@ -3560,7 +3567,7 @@ protected String partitionTargetPath(SchemaTableName schemaTableName, String par ConnectorSession session = newSession(); SemiTransactionalHiveMetastore metastore = transaction.getMetastore(); LocationService locationService = getLocationService(); - Table table = metastore.getTable(schemaTableName.getSchemaName(), schemaTableName.getTableName()).get(); + Table table = metastore.getTable(session.getIdentity(), schemaTableName.getSchemaName(), schemaTableName.getTableName()).get(); LocationHandle handle = locationService.forExistingTable(metastore, session, table, false); return locationService.getPartitionWriteInfo(handle, Optional.empty(), partitionName).getTargetPath().toString(); } @@ -3734,7 +3741,7 @@ protected void doCreateTable(SchemaTableName tableName, HiveStorageFormat storag assertEquals(table.getParameters().get(PRESTO_QUERY_ID_NAME), queryId); // verify basic statistics - HiveBasicStatistics statistics = getBasicStatisticsForTable(transaction, tableName); + HiveBasicStatistics statistics = getBasicStatisticsForTable(session.getIdentity(), transaction, tableName); assertEquals(statistics.getRowCount().getAsLong(), CREATE_TABLE_DATA.getRowCount()); assertEquals(statistics.getFileCount().getAsLong(), 1L); assertGreaterThan(statistics.getInMemoryDataSizeInBytes().getAsLong(), 0L); @@ -3782,7 +3789,7 @@ protected void doCreateEmptyTable(SchemaTableName tableName, HiveStorageFormat s assertEquals(filterNonHiddenColumnMetadata(tableMetadata.getColumns()), expectedColumns); // verify table format - Table table = transaction.getMetastore().getTable(tableName.getSchemaName(), tableName.getTableName()).get(); + Table table = transaction.getMetastore().getTable(session.getIdentity(), tableName.getSchemaName(), tableName.getTableName()).get(); assertEquals(table.getStorage().getStorageFormat().getInputFormat(), storageFormat.getInputFormat()); // verify the node version and query ID @@ -3796,14 +3803,14 @@ protected void doCreateEmptyTable(SchemaTableName tableName, HiveStorageFormat s // verify basic statistics if (partitionedBy.isEmpty()) { - assertEmptyTableStatistics(tableName, transaction); + assertEmptyTableStatistics(session.getIdentity(), tableName, transaction); } } } - protected void assertEmptyTableStatistics(SchemaTableName tableName, Transaction transaction) + protected void assertEmptyTableStatistics(ConnectorIdentity identity, SchemaTableName tableName, Transaction transaction) { - HiveBasicStatistics statistics = getBasicStatisticsForTable(transaction, tableName); + HiveBasicStatistics statistics = getBasicStatisticsForTable(identity, transaction, tableName); assertEquals(statistics.getRowCount().getAsLong(), 0L); assertEquals(statistics.getFileCount().getAsLong(), 0L); assertEquals(statistics.getInMemoryDataSizeInBytes().getAsLong(), 0L); @@ -3837,7 +3844,7 @@ private void doInsert(HiveStorageFormat storageFormat, SchemaTableName tableName assertEqualsIgnoreOrder(result.getMaterializedRows(), resultBuilder.build().getMaterializedRows()); // statistics - HiveBasicStatistics tableStatistics = getBasicStatisticsForTable(transaction, tableName); + HiveBasicStatistics tableStatistics = getBasicStatisticsForTable(session.getIdentity(), transaction, tableName); assertEquals(tableStatistics.getRowCount().getAsLong(), CREATE_TABLE_DATA.getRowCount() * (i + 1)); assertEquals(tableStatistics.getFileCount().getAsLong(), i + 1L); assertGreaterThan(tableStatistics.getInMemoryDataSizeInBytes().getAsLong(), 0L); @@ -3848,7 +3855,8 @@ private void doInsert(HiveStorageFormat storageFormat, SchemaTableName tableName // test rollback Set existingFiles; try (Transaction transaction = newTransaction()) { - existingFiles = listAllDataFiles(transaction, tableName.getSchemaName(), tableName.getTableName()); + ConnectorSession session = newSession(); + existingFiles = listAllDataFiles(session.getIdentity(), transaction, tableName.getSchemaName(), tableName.getTableName()); assertFalse(existingFiles.isEmpty()); } @@ -3872,12 +3880,12 @@ private void doInsert(HiveStorageFormat storageFormat, SchemaTableName tableName metadata.finishInsert(session, insertTableHandle, fragments, ImmutableList.of()); // statistics, visible from within transaction - HiveBasicStatistics tableStatistics = getBasicStatisticsForTable(transaction, tableName); + HiveBasicStatistics tableStatistics = getBasicStatisticsForTable(session.getIdentity(), transaction, tableName); assertEquals(tableStatistics.getRowCount().getAsLong(), CREATE_TABLE_DATA.getRowCount() * 5L); try (Transaction otherTransaction = newTransaction()) { // statistics, not visible from outside transaction - HiveBasicStatistics otherTableStatistics = getBasicStatisticsForTable(otherTransaction, tableName); + HiveBasicStatistics otherTableStatistics = getBasicStatisticsForTable(session.getIdentity(), otherTransaction, tableName); assertEquals(otherTableStatistics.getRowCount().getAsLong(), CREATE_TABLE_DATA.getRowCount() * 3L); } @@ -3913,12 +3921,13 @@ private void doInsert(HiveStorageFormat storageFormat, SchemaTableName tableName assertEqualsIgnoreOrder(result.getMaterializedRows(), resultBuilder.build().getMaterializedRows()); // verify we did not modify the table directory - assertEquals(listAllDataFiles(transaction, tableName.getSchemaName(), tableName.getTableName()), existingFiles); + assertEquals(listAllDataFiles(session.getIdentity(), transaction, tableName.getSchemaName(), tableName.getTableName()), existingFiles); } // verify statistics unchanged try (Transaction transaction = newTransaction()) { - HiveBasicStatistics statistics = getBasicStatisticsForTable(transaction, tableName); + ConnectorSession session = newSession(); + HiveBasicStatistics statistics = getBasicStatisticsForTable(session.getIdentity(), transaction, tableName); assertEquals(statistics.getRowCount().getAsLong(), CREATE_TABLE_DATA.getRowCount() * 3L); assertEquals(statistics.getFileCount().getAsLong(), 3L); } @@ -3970,21 +3979,21 @@ protected Optional getTempFilePathRoot(ConnectorOutputTableHandle outputTa .getTempPath(); } - protected Set listAllDataFiles(Transaction transaction, String schemaName, String tableName) + protected Set listAllDataFiles(ConnectorIdentity identity, Transaction transaction, String schemaName, String tableName) throws IOException { HdfsContext context = new HdfsContext(newSession(), schemaName, tableName); Set existingFiles = new HashSet<>(); - for (String location : listAllDataPaths(transaction.getMetastore(), schemaName, tableName)) { + for (String location : listAllDataPaths(identity, transaction.getMetastore(), schemaName, tableName)) { existingFiles.addAll(listAllDataFiles(context, new Path(location))); } return existingFiles; } - public static List listAllDataPaths(SemiTransactionalHiveMetastore metastore, String schemaName, String tableName) + public static List listAllDataPaths(ConnectorIdentity identity, SemiTransactionalHiveMetastore metastore, String schemaName, String tableName) { ImmutableList.Builder locations = ImmutableList.builder(); - Table table = metastore.getTable(schemaName, tableName).get(); + Table table = metastore.getTable(identity, schemaName, tableName).get(); if (table.getStorage().getLocation() != null) { // For partitioned table, there should be nothing directly under this directory. // But including this location in the set makes the directory content assert more @@ -3992,9 +4001,9 @@ public static List listAllDataPaths(SemiTransactionalHiveMetastore metas locations.add(table.getStorage().getLocation()); } - Optional> partitionNames = metastore.getPartitionNames(schemaName, tableName); + Optional> partitionNames = metastore.getPartitionNames(identity, schemaName, tableName); if (partitionNames.isPresent()) { - metastore.getPartitionsByNames(schemaName, tableName, partitionNames.get()).values().stream() + metastore.getPartitionsByNames(identity, schemaName, tableName, partitionNames.get()).values().stream() .map(Optional::get) .map(partition -> partition.getStorage().getLocation()) .filter(location -> !location.startsWith(table.getStorage().getLocation())) @@ -4036,8 +4045,9 @@ private void doInsertIntoNewPartition(HiveStorageFormat storageFormat, SchemaTab Set existingFiles; try (Transaction transaction = newTransaction()) { + ConnectorSession session = newSession(); // verify partitions were created - List partitionNames = transaction.getMetastore().getPartitionNames(tableName.getSchemaName(), tableName.getTableName()) + List partitionNames = transaction.getMetastore().getPartitionNames(session.getIdentity(), tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new AssertionError("Table does not exist: " + tableName)); assertEqualsIgnoreOrder(partitionNames, CREATE_TABLE_PARTITIONED_DATA.getMaterializedRows().stream() .map(row -> "ds=" + row.getField(CREATE_TABLE_PARTITIONED_DATA.getTypes().size() - 1)) @@ -4053,7 +4063,6 @@ private void doInsertIntoNewPartition(HiveStorageFormat storageFormat, SchemaTab } // load the new table - ConnectorSession session = newSession(); ConnectorMetadata metadata = transaction.getMetadata(); ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName); List columnHandles = filterNonHiddenColumnHandles(metadata.getColumnHandles(session, tableHandle).values()); @@ -4063,12 +4072,12 @@ private void doInsertIntoNewPartition(HiveStorageFormat storageFormat, SchemaTab assertEqualsIgnoreOrder(result.getMaterializedRows(), CREATE_TABLE_PARTITIONED_DATA.getMaterializedRows()); // test rollback - existingFiles = listAllDataFiles(transaction, tableName.getSchemaName(), tableName.getTableName()); + existingFiles = listAllDataFiles(session.getIdentity(), transaction, tableName.getSchemaName(), tableName.getTableName()); assertFalse(existingFiles.isEmpty()); // test statistics for (String partitionName : partitionNames) { - HiveBasicStatistics partitionStatistics = getBasicStatisticsForPartition(transaction, tableName, partitionName); + HiveBasicStatistics partitionStatistics = getBasicStatisticsForPartition(session.getIdentity(), transaction, tableName, partitionName); assertEquals(partitionStatistics.getRowCount().getAsLong(), 1L); assertEquals(partitionStatistics.getFileCount().getAsLong(), 1L); assertGreaterThan(partitionStatistics.getInMemoryDataSizeInBytes().getAsLong(), 0L); @@ -4117,7 +4126,7 @@ private void doInsertIntoNewPartition(HiveStorageFormat storageFormat, SchemaTab assertEqualsIgnoreOrder(result.getMaterializedRows(), CREATE_TABLE_PARTITIONED_DATA.getMaterializedRows()); // verify we did not modify the table directory - assertEquals(listAllDataFiles(transaction, tableName.getSchemaName(), tableName.getTableName()), existingFiles); + assertEquals(listAllDataFiles(session.getIdentity(), transaction, tableName.getSchemaName(), tableName.getTableName()), existingFiles); // verify temp directory is empty HdfsContext context = new HdfsContext(session, tableName.getSchemaName(), tableName.getTableName()); @@ -4163,7 +4172,7 @@ private void doInsertIntoExistingPartition(HiveStorageFormat storageFormat, Sche ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName); // verify partitions were created - List partitionNames = transaction.getMetastore().getPartitionNames(tableName.getSchemaName(), tableName.getTableName()) + List partitionNames = transaction.getMetastore().getPartitionNames(session.getIdentity(), tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new AssertionError("Table does not exist: " + tableName)); assertEqualsIgnoreOrder(partitionNames, CREATE_TABLE_PARTITIONED_DATA.getMaterializedRows().stream() .map(row -> "ds=" + row.getField(CREATE_TABLE_PARTITIONED_DATA.getTypes().size() - 1)) @@ -4179,7 +4188,7 @@ private void doInsertIntoExistingPartition(HiveStorageFormat storageFormat, Sche // test statistics for (String partitionName : partitionNames) { - HiveBasicStatistics statistics = getBasicStatisticsForPartition(transaction, tableName, partitionName); + HiveBasicStatistics statistics = getBasicStatisticsForPartition(session.getIdentity(), transaction, tableName, partitionName); assertEquals(statistics.getRowCount().getAsLong(), i + 1L); assertEquals(statistics.getFileCount().getAsLong(), i + 1L); assertGreaterThan(statistics.getInMemoryDataSizeInBytes().getAsLong(), 0L); @@ -4195,7 +4204,7 @@ private void doInsertIntoExistingPartition(HiveStorageFormat storageFormat, Sche ConnectorMetadata metadata = transaction.getMetadata(); ConnectorSession session = newSession(); - existingFiles = listAllDataFiles(transaction, tableName.getSchemaName(), tableName.getTableName()); + existingFiles = listAllDataFiles(session.getIdentity(), transaction, tableName.getSchemaName(), tableName.getTableName()); assertFalse(existingFiles.isEmpty()); ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName); @@ -4222,10 +4231,10 @@ private void doInsertIntoExistingPartition(HiveStorageFormat storageFormat, Sche } // verify statistics are visible from within of the current transaction - List partitionNames = transaction.getMetastore().getPartitionNames(tableName.getSchemaName(), tableName.getTableName()) + List partitionNames = transaction.getMetastore().getPartitionNames(session.getIdentity(), tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new AssertionError("Table does not exist: " + tableName)); for (String partitionName : partitionNames) { - HiveBasicStatistics partitionStatistics = getBasicStatisticsForPartition(transaction, tableName, partitionName); + HiveBasicStatistics partitionStatistics = getBasicStatisticsForPartition(session.getIdentity(), transaction, tableName, partitionName); assertEquals(partitionStatistics.getRowCount().getAsLong(), 5L); } @@ -4244,17 +4253,17 @@ private void doInsertIntoExistingPartition(HiveStorageFormat storageFormat, Sche assertEqualsIgnoreOrder(result.getMaterializedRows(), resultBuilder.build().getMaterializedRows()); // verify we did not modify the table directory - assertEquals(listAllDataFiles(transaction, tableName.getSchemaName(), tableName.getTableName()), existingFiles); + assertEquals(listAllDataFiles(session.getIdentity(), transaction, tableName.getSchemaName(), tableName.getTableName()), existingFiles); // verify temp directory is empty HdfsContext context = new HdfsContext(session, tableName.getSchemaName(), tableName.getTableName()); assertTrue(listAllDataFiles(context, stagingPathRoot).isEmpty()); // verify statistics have been rolled back - List partitionNames = transaction.getMetastore().getPartitionNames(tableName.getSchemaName(), tableName.getTableName()) + List partitionNames = transaction.getMetastore().getPartitionNames(session.getIdentity(), tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new AssertionError("Table does not exist: " + tableName)); for (String partitionName : partitionNames) { - HiveBasicStatistics partitionStatistics = getBasicStatisticsForPartition(transaction, tableName, partitionName); + HiveBasicStatistics partitionStatistics = getBasicStatisticsForPartition(session.getIdentity(), transaction, tableName, partitionName); assertEquals(partitionStatistics.getRowCount().getAsLong(), 3L); } } @@ -4279,13 +4288,13 @@ private void doInsertIntoExistingPartitionEmptyStatistics(HiveStorageFormat stor eraseStatistics(tableName); insertData(tableName, CREATE_TABLE_PARTITIONED_DATA); - + ConnectorSession session = newSession(); try (Transaction transaction = newTransaction()) { - List partitionNames = transaction.getMetastore().getPartitionNames(tableName.getSchemaName(), tableName.getTableName()) + List partitionNames = transaction.getMetastore().getPartitionNames(session.getIdentity(), tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new AssertionError("Table does not exist: " + tableName)); for (String partitionName : partitionNames) { - HiveBasicStatistics statistics = getBasicStatisticsForPartition(transaction, tableName, partitionName); + HiveBasicStatistics statistics = getBasicStatisticsForPartition(session.getIdentity(), transaction, tableName, partitionName); assertThat(statistics.getRowCount()).isNotPresent(); assertThat(statistics.getInMemoryDataSizeInBytes()).isNotPresent(); // fileCount and rawSize statistics are computed on the fly by the metastore, thus cannot be erased @@ -4293,19 +4302,19 @@ private void doInsertIntoExistingPartitionEmptyStatistics(HiveStorageFormat stor } } - private static HiveBasicStatistics getBasicStatisticsForTable(Transaction transaction, SchemaTableName table) + private static HiveBasicStatistics getBasicStatisticsForTable(ConnectorIdentity identity, Transaction transaction, SchemaTableName table) { return transaction .getMetastore() - .getTableStatistics(table.getSchemaName(), table.getTableName()) + .getTableStatistics(identity, table.getSchemaName(), table.getTableName()) .getBasicStatistics(); } - private static HiveBasicStatistics getBasicStatisticsForPartition(Transaction transaction, SchemaTableName table, String partitionName) + private static HiveBasicStatistics getBasicStatisticsForPartition(ConnectorIdentity identity, Transaction transaction, SchemaTableName table, String partitionName) { return transaction .getMetastore() - .getPartitionStatistics(table.getSchemaName(), table.getTableName(), ImmutableSet.of(partitionName)) + .getPartitionStatistics(identity, table.getSchemaName(), table.getTableName(), ImmutableSet.of(partitionName)) .get(partitionName) .getBasicStatistics(); } @@ -4395,14 +4404,14 @@ private void doTestMetadataDelete(HiveStorageFormat storageFormat, SchemaTableNa ConnectorMetadata metadata = transaction.getMetadata(); // verify partitions were created - List partitionNames = transaction.getMetastore().getPartitionNames(tableName.getSchemaName(), tableName.getTableName()) + List partitionNames = transaction.getMetastore().getPartitionNames(session.getIdentity(), tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new AssertionError("Table does not exist: " + tableName)); assertEqualsIgnoreOrder(partitionNames, CREATE_TABLE_PARTITIONED_DATA.getMaterializedRows().stream() .map(row -> "ds=" + row.getField(CREATE_TABLE_PARTITIONED_DATA.getTypes().size() - 1)) .collect(toList())); // verify table directory is not empty - Set filesAfterInsert = listAllDataFiles(transaction, tableName.getSchemaName(), tableName.getTableName()); + Set filesAfterInsert = listAllDataFiles(session.getIdentity(), transaction, tableName.getSchemaName(), tableName.getTableName()); assertFalse(filesAfterInsert.isEmpty()); // verify the data @@ -4476,7 +4485,7 @@ private void doTestMetadataDelete(HiveStorageFormat storageFormat, SchemaTableNa assertEqualsIgnoreOrder(actualAfterDelete2.getMaterializedRows(), ImmutableList.of()); // verify table directory is empty - Set filesAfterDelete = listAllDataFiles(transaction, tableName.getSchemaName(), tableName.getTableName()); + Set filesAfterDelete = listAllDataFiles(session.getIdentity(), transaction, tableName.getSchemaName(), tableName.getTableName()); assertTrue(filesAfterDelete.isEmpty()); } } @@ -5090,7 +5099,8 @@ protected Table createEmptyTable(SchemaTableName schemaTableName, HiveStorageFor assertEquals(targetDirectoryList, ImmutableList.of()); try (Transaction transaction = newTransaction()) { - return transaction.getMetastore().getTable(schemaTableName.getSchemaName(), schemaTableName.getTableName()).get(); + ConnectorSession session = newSession(); + return transaction.getMetastore().getTable(session.getIdentity(), schemaTableName.getSchemaName(), schemaTableName.getTableName()).get(); } } @@ -5102,13 +5112,12 @@ private void alterBucketProperty(SchemaTableName schemaTableName, Optional table = transaction.getMetastore().getTable(schemaName, tableName); + Optional
table = transaction.getMetastore().getTable(session.getIdentity(), schemaName, tableName); Table.Builder tableBuilder = Table.builder(table.get()); tableBuilder.getStorageBuilder().setBucketProperty(bucketProperty); PrincipalPrivileges principalPrivileges = testingPrincipalPrivilege(tableOwner, session.getUser()); // hack: replaceView can be used as replaceTable despite its name - transaction.getMetastore().replaceView(schemaName, tableName, tableBuilder.build(), principalPrivileges); + transaction.getMetastore().replaceView(session.getIdentity(), schemaName, tableName, tableBuilder.build(), principalPrivileges); transaction.commit(); } @@ -5322,9 +5331,10 @@ private void doTestTransactionDeleteInsert( } try (Transaction transaction = newTransaction()) { + ConnectorSession session = newSession(); // verify partitions List partitionNames = transaction.getMetastore() - .getPartitionNames(tableName.getSchemaName(), tableName.getTableName()) + .getPartitionNames(session.getIdentity(), tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new AssertionError("Table does not exist: " + tableName)); assertEqualsIgnoreOrder( partitionNames, @@ -5334,7 +5344,6 @@ private void doTestTransactionDeleteInsert( .collect(toList())); // load the new table - ConnectorSession session = newSession(); ConnectorMetadata metadata = transaction.getMetadata(); ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName); List columnHandles = filterNonHiddenColumnHandles(metadata.getColumnHandles(session, tableHandle).values()); diff --git a/presto-hive/src/test/java/com/facebook/presto/hive/AbstractTestHiveFileSystem.java b/presto-hive/src/test/java/com/facebook/presto/hive/AbstractTestHiveFileSystem.java index 7e6e81a50449b..bdfc1f77a6302 100644 --- a/presto-hive/src/test/java/com/facebook/presto/hive/AbstractTestHiveFileSystem.java +++ b/presto-hive/src/test/java/com/facebook/presto/hive/AbstractTestHiveFileSystem.java @@ -20,7 +20,9 @@ import com.facebook.presto.hive.AbstractTestHiveClient.HiveTransaction; import com.facebook.presto.hive.AbstractTestHiveClient.Transaction; import com.facebook.presto.hive.HdfsEnvironment.HdfsContext; +import com.facebook.presto.hive.authentication.HiveMetastoreAuthentication; import com.facebook.presto.hive.authentication.NoHdfsAuthentication; +import com.facebook.presto.hive.authentication.NoHiveMetastoreAuthentication; import com.facebook.presto.hive.metastore.CachingHiveMetastore; import com.facebook.presto.hive.metastore.Database; import com.facebook.presto.hive.metastore.ExtendedHiveMetastore; @@ -70,6 +72,8 @@ import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; +import org.weakref.jmx.MBeanExporter; +import org.weakref.jmx.testing.TestingMBeanServer; import java.io.IOException; import java.io.UncheckedIOException; @@ -124,6 +128,7 @@ public abstract class AbstractTestHiveFileSystem protected SchemaTableName temporaryCreateTable; protected HdfsEnvironment hdfsEnvironment; + protected HiveMetastoreAuthentication metastoreAuthentication; protected LocationService locationService; protected TestingHiveMetastore metastoreClient; protected HiveMetadataFactory metadataFactory; @@ -177,11 +182,12 @@ protected void setup(String host, int port, String databaseName, BiFunction partitionUpdateCodec = JsonCodec.jsonCodec(PartitionUpdate.class); metadataFactory = new HiveMetadataFactory( @@ -189,6 +195,7 @@ protected void setup(String host, int port, String databaseName, BiFunction tablesMap = new HashMap<>(); private final Map> partitionsMap = new HashMap<>(); - private TestingSemiTransactionalHiveMetastore(HdfsEnvironment hdfsEnvironment, ExtendedHiveMetastore delegate, ListeningExecutorService renameExecutor, boolean skipDeletionForAlter, boolean skipTargetCleanupOnRollback) + private TestingSemiTransactionalHiveMetastore(HdfsEnvironment hdfsEnvironment, ExtendedHiveMetastore delegate, HiveMetastoreAuthentication authentication, ListeningExecutorService renameExecutor, boolean skipDeletionForAlter, boolean skipTargetCleanupOnRollback) { - super(hdfsEnvironment, delegate, renameExecutor, skipDeletionForAlter, skipTargetCleanupOnRollback); + super(hdfsEnvironment, delegate, renameExecutor, authentication, skipDeletionForAlter, skipTargetCleanupOnRollback); } public static TestingSemiTransactionalHiveMetastore create() @@ -74,11 +79,12 @@ public static TestingSemiTransactionalHiveMetastore create() HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(config, metastoreClientConfig), ImmutableSet.of()); HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, metastoreClientConfig, new NoHdfsAuthentication()); HiveCluster hiveCluster = new TestingHiveCluster(metastoreClientConfig, HOST, PORT); - ExtendedHiveMetastore delegate = new BridgingHiveMetastore(new ThriftHiveMetastore(hiveCluster)); + HiveMetastoreAuthentication metastoreAuthentication = new NoHiveMetastoreAuthentication(); + ExtendedHiveMetastore delegate = new BridgingHiveMetastore(new ThriftHiveMetastore(hiveCluster, metastoreClientConfig, new MBeanExporter(new TestingMBeanServer()))); ExecutorService executor = newCachedThreadPool(daemonThreadsNamed("hive-%s")); ListeningExecutorService renameExecutor = listeningDecorator(executor); - return new TestingSemiTransactionalHiveMetastore(hdfsEnvironment, delegate, renameExecutor, false, false); + return new TestingSemiTransactionalHiveMetastore(hdfsEnvironment, delegate, metastoreAuthentication, renameExecutor, false, false); } public void addTable(String database, String tableName, Table table, List partitions) @@ -89,85 +95,85 @@ public void addTable(String database, String tableName, Table table, List getAllDatabases() + public synchronized List getAllDatabases(ConnectorIdentity identity) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized Optional getDatabase(String databaseName) + public synchronized Optional getDatabase(ConnectorIdentity identity, String databaseName) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized Optional> getAllTables(String databaseName) + public synchronized Optional> getAllTables(ConnectorIdentity identity, String databaseName) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized Optional
getTable(String databaseName, String tableName) + public synchronized Optional
getTable(ConnectorIdentity identity, String databaseName, String tableName) { return Optional.ofNullable(tablesMap.get(new HiveTableName(databaseName, tableName))); } @Override - public synchronized Set getSupportedColumnStatistics(Type type) + public synchronized Set getSupportedColumnStatistics(ConnectorIdentity identity, Type type) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized PartitionStatistics getTableStatistics(String databaseName, String tableName) + public synchronized PartitionStatistics getTableStatistics(ConnectorIdentity identity, String databaseName, String tableName) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized Map getPartitionStatistics(String databaseName, String tableName, Set partitionNames) + public synchronized Map getPartitionStatistics(ConnectorIdentity identity, String databaseName, String tableName, Set partitionNames) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized HivePageSinkMetadata generatePageSinkMetadata(SchemaTableName schemaTableName) + public synchronized HivePageSinkMetadata generatePageSinkMetadata(ConnectorIdentity identity, SchemaTableName schemaTableName) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized Optional> getAllViews(String databaseName) + public synchronized Optional> getAllViews(ConnectorIdentity identity, String databaseName) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized void createDatabase(Database database) + public synchronized void createDatabase(ConnectorIdentity identity, Database database) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized void dropDatabase(String schemaName) + public synchronized void dropDatabase(ConnectorIdentity identity, String schemaName) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized void renameDatabase(String source, String target) + public synchronized void renameDatabase(ConnectorIdentity identity, String source, String target) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized void setTableStatistics(Table table, PartitionStatistics tableStatistics) + public synchronized void setTableStatistics(ConnectorIdentity identity, Table table, PartitionStatistics tableStatistics) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized void setPartitionStatistics(Table table, Map, PartitionStatistics> partitionStatisticsMap) + public synchronized void setPartitionStatistics(ConnectorIdentity identity, Table table, Map, PartitionStatistics> partitionStatisticsMap) { throw new UnsupportedOperationException("method not implemented"); } @@ -185,31 +191,31 @@ public synchronized void dropTable(ConnectorSession session, String databaseName } @Override - public synchronized void replaceView(String databaseName, String tableName, Table table, PrincipalPrivileges principalPrivileges) + public synchronized void replaceView(ConnectorIdentity identity, String databaseName, String tableName, Table table, PrincipalPrivileges principalPrivileges) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized void renameTable(String databaseName, String tableName, String newDatabaseName, String newTableName) + public synchronized void renameTable(ConnectorIdentity identity, String databaseName, String tableName, String newDatabaseName, String newTableName) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized void addColumn(String databaseName, String tableName, String columnName, HiveType columnType, String columnComment) + public synchronized void addColumn(ConnectorIdentity identity, String databaseName, String tableName, String columnName, HiveType columnType, String columnComment) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized void renameColumn(String databaseName, String tableName, String oldColumnName, String newColumnName) + public synchronized void renameColumn(ConnectorIdentity identity, String databaseName, String tableName, String oldColumnName, String newColumnName) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized void dropColumn(String databaseName, String tableName, String columnName) + public synchronized void dropColumn(ConnectorIdentity identity, String databaseName, String tableName, String columnName) { throw new UnsupportedOperationException("method not implemented"); } @@ -227,25 +233,25 @@ public synchronized void truncateUnpartitionedTable(ConnectorSession session, St } @Override - public synchronized Optional> getPartitionNames(String databaseName, String tableName) + public synchronized Optional> getPartitionNames(ConnectorIdentity identity, String databaseName, String tableName) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized Optional> getPartitionNamesByFilter(String databaseName, String tableName, Map effectivePredicate) + public synchronized Optional> getPartitionNamesByFilter(ConnectorIdentity identity, String databaseName, String tableName, Map effectivePredicate) { return Optional.ofNullable(partitionsMap.get(new HiveTableName(databaseName, tableName))); } @Override - public synchronized Optional getPartition(String databaseName, String tableName, List partitionValues) + public synchronized Optional getPartition(ConnectorIdentity identity, String databaseName, String tableName, List partitionValues) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized Map> getPartitionsByNames(String databaseName, String tableName, List partitionNames) + public synchronized Map> getPartitionsByNames(ConnectorIdentity identity, String databaseName, String tableName, List partitionNames) { throw new UnsupportedOperationException("method not implemented"); } @@ -269,55 +275,55 @@ public synchronized void finishInsertIntoExistingPartition(ConnectorSession sess } @Override - public synchronized void createRole(String role, String grantor) + public synchronized void createRole(ConnectorIdentity identity, String role, String grantor) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized void dropRole(String role) + public synchronized void dropRole(ConnectorIdentity identity, String role) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized Set listRoles() + public synchronized Set listRoles(ConnectorIdentity identity) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized void grantRoles(Set roles, Set grantees, boolean withAdminOption, PrestoPrincipal grantor) + public synchronized void grantRoles(ConnectorIdentity identity, Set roles, Set grantees, boolean withAdminOption, PrestoPrincipal grantor) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized void revokeRoles(Set roles, Set grantees, boolean adminOptionFor, PrestoPrincipal grantor) + public synchronized void revokeRoles(ConnectorIdentity identity, Set roles, Set grantees, boolean adminOptionFor, PrestoPrincipal grantor) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized Set listRoleGrants(PrestoPrincipal principal) + public synchronized Set listRoleGrants(ConnectorIdentity identity, PrestoPrincipal principal) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized Set listTablePrivileges(String databaseName, String tableName, PrestoPrincipal principal) + public synchronized Set listTablePrivileges(ConnectorIdentity identity, String databaseName, String tableName, PrestoPrincipal principal) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized void grantTablePrivileges(String databaseName, String tableName, PrestoPrincipal grantee, Set privileges) + public synchronized void grantTablePrivileges(ConnectorIdentity identity, String databaseName, String tableName, PrestoPrincipal grantee, Set privileges) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized void revokeTablePrivileges(String databaseName, String tableName, PrestoPrincipal grantee, Set privileges) + public synchronized void revokeTablePrivileges(ConnectorIdentity identity, String databaseName, String tableName, PrestoPrincipal grantee, Set privileges) { throw new UnsupportedOperationException("method not implemented"); }