From cd5d8413f2e491ff80827ca97d941c80a3a909a4 Mon Sep 17 00:00:00 2001 From: Zhongting Hu Date: Thu, 21 Mar 2019 10:57:02 -0700 Subject: [PATCH] Support Hive Metastore access with impersonation This PR enabled the option of impersonation access of the Hive Metastore impersonation. It also includes the changes to enable multiple HMS instances load-balancing, and reporting the stats for each individual HMS instance. --- .../main/sphinx/connector/hive-security.rst | 1 + .../src/main/sphinx/connector/hive.rst | 1 + .../plugin/geospatial/TestSpatialJoins.java | 3 +- .../presto/hive/MetastoreClientConfig.java | 15 + .../HiveMetastoreAuthentication.java | 4 +- .../NoHiveMetastoreAuthentication.java | 4 +- .../hive/metastore/CachingHiveMetastore.java | 472 ++++++++----- .../hive/metastore/ExtendedHiveMetastore.java | 72 +- .../HivePageSinkMetadataProvider.java | 6 +- .../hive/metastore/MetastoreContext.java | 70 ++ .../presto/hive/metastore/MetastoreUtil.java | 4 +- .../metastore/RecordingHiveMetastore.java | 144 ++-- .../SemiTransactionalHiveMetastore.java | 278 ++++---- .../alluxio/AlluxioHiveMetastore.java | 90 +-- .../metastore/file/FileHiveMetastore.java | 178 ++--- .../metastore/glue/GlueHiveMetastore.java | 103 +-- .../thrift/BridgingHiveMetastore.java | 167 ++--- .../hive/metastore/thrift/HiveCluster.java | 4 +- .../hive/metastore/thrift/HiveMetastore.java | 77 +- .../thrift/HiveMetastoreApiStats.java | 6 + .../metastore/thrift/HiveMetastoreClient.java | 3 + .../thrift/HiveMetastoreClientFactory.java | 4 +- .../metastore/thrift/StaticHiveCluster.java | 16 +- .../thrift/StaticMetastoreConfig.java | 14 + .../metastore/thrift/ThriftHiveMetastore.java | 658 +++++++++--------- .../thrift/ThriftHiveMetastoreClient.java | 7 + .../thrift/ThriftHiveMetastoreStats.java | 16 + .../thrift/ThriftMetastoreModule.java | 2 + .../metastore/thrift/ThriftMetastoreUtil.java | 23 +- .../hive/metastore/thrift/Transport.java | 5 +- .../presto/hive/MockHiveMetastore.java | 5 +- .../metastore/TestCachingHiveMetastore.java | 102 +-- .../metastore/TestMetastoreClientConfig.java | 7 +- .../metastore/TestRecordingHiveMetastore.java | 63 +- .../metastore/UnimplementedHiveMetastore.java | 72 +- .../thrift/InMemoryHiveMetastore.java | 85 +-- .../thrift/MockHiveMetastoreClient.java | 9 + .../MockHiveMetastoreClientFactory.java | 2 +- .../thrift/TestStaticHiveCluster.java | 8 +- .../thrift/TestStaticMetastoreConfig.java | 11 +- .../metastore/thrift/TestingHiveCluster.java | 5 +- .../hive/CreateEmptyPartitionProcedure.java | 3 +- .../facebook/presto/hive/HiveMetadata.java | 149 ++-- .../presto/hive/HiveMetadataFactory.java | 7 +- .../presto/hive/HivePageSinkProvider.java | 5 +- .../presto/hive/HivePartitionManager.java | 17 +- .../presto/hive/HiveSplitManager.java | 6 +- .../facebook/presto/hive/HiveWriteUtils.java | 8 +- .../hive/SyncPartitionMetadataProcedure.java | 5 +- .../KerberosHiveMetastoreAuthentication.java | 100 ++- .../UserGroupInformationUtils.java | 4 +- .../presto/hive/rule/HiveFilterPushdown.java | 3 +- .../hive/security/LegacyAccessControl.java | 3 +- .../security/SqlStandardAccessControl.java | 13 +- .../MetastoreHiveStatisticsProvider.java | 14 +- .../presto/hive/AbstractTestHiveClient.java | 206 +++--- .../hive/AbstractTestHiveClientLocal.java | 5 +- .../hive/AbstractTestHiveFileSystem.java | 40 +- .../presto/hive/HiveBenchmarkQueryRunner.java | 3 +- .../facebook/presto/hive/HiveQueryRunner.java | 13 +- .../hive/TestHiveClientFileMetastore.java | 14 +- ...eMetadataFileFormatEncryptionSettings.java | 10 +- .../presto/hive/TestHivePageSink.java | 3 +- .../presto/hive/TestHiveSplitManager.java | 8 +- ...TestingSemiTransactionalHiveMetastore.java | 69 +- .../glue/TestHiveClientGlueMetastore.java | 3 +- .../TestMetastoreHiveStatisticsProvider.java | 10 +- .../presto/spark/PrestoSparkQueryRunner.java | 7 +- .../spark/TestPrestoSparkQueryRunner.java | 3 +- 69 files changed, 2005 insertions(+), 1542 deletions(-) create mode 100644 presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/MetastoreContext.java diff --git a/presto-docs/src/main/sphinx/connector/hive-security.rst b/presto-docs/src/main/sphinx/connector/hive-security.rst index f9d949b55dad0..fa4340a5f850b 100644 --- a/presto-docs/src/main/sphinx/connector/hive-security.rst +++ b/presto-docs/src/main/sphinx/connector/hive-security.rst @@ -131,6 +131,7 @@ Property Name Description to the Hive metastore service. ``hive.metastore.client.keytab`` Hive metastore client keytab location. +``hive.metastore-impersonation-enabled`` Enable metastore end-user impersonation. ================================================== ============================================================ ``hive.metastore.authentication.type`` diff --git a/presto-docs/src/main/sphinx/connector/hive.rst b/presto-docs/src/main/sphinx/connector/hive.rst index fdd66599efbb1..78b856590360d 100644 --- a/presto-docs/src/main/sphinx/connector/hive.rst +++ b/presto-docs/src/main/sphinx/connector/hive.rst @@ -184,6 +184,7 @@ Property Name Description ``hive.s3select-pushdown.max-connections`` Maximum number of simultaneously open connections to S3 for 500 S3SelectPushdown. +``hive.metastore.load-balancing-enabled`` Enable load balancing between multiple Metastore instances ================================================== ============================================================ ============ Metastore Configuration Properties diff --git a/presto-geospatial/src/test/java/com/facebook/presto/plugin/geospatial/TestSpatialJoins.java b/presto-geospatial/src/test/java/com/facebook/presto/plugin/geospatial/TestSpatialJoins.java index 7675268f1e583..f337927b42474 100644 --- a/presto-geospatial/src/test/java/com/facebook/presto/plugin/geospatial/TestSpatialJoins.java +++ b/presto-geospatial/src/test/java/com/facebook/presto/plugin/geospatial/TestSpatialJoins.java @@ -23,6 +23,7 @@ import com.facebook.presto.hive.MetastoreClientConfig; import com.facebook.presto.hive.authentication.NoHdfsAuthentication; import com.facebook.presto.hive.metastore.Database; +import com.facebook.presto.hive.metastore.MetastoreContext; import com.facebook.presto.hive.metastore.file.FileHiveMetastore; import com.facebook.presto.spi.security.PrincipalType; import com.facebook.presto.tests.AbstractTestQueryFramework; @@ -104,7 +105,7 @@ private static DistributedQueryRunner createQueryRunner() HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, metastoreClientConfig, new NoHdfsAuthentication()); FileHiveMetastore metastore = new FileHiveMetastore(hdfsEnvironment, baseDir.toURI().toString(), "test"); - metastore.createDatabase(Database.builder() + metastore.createDatabase(new MetastoreContext("test_user"), Database.builder() .setDatabaseName("default") .setOwnerName("public") .setOwnerType(PrincipalType.ROLE) diff --git a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/MetastoreClientConfig.java b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/MetastoreClientConfig.java index d85a521a21f4b..58e03163b25c3 100644 --- a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/MetastoreClientConfig.java +++ b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/MetastoreClientConfig.java @@ -14,6 +14,7 @@ package com.facebook.presto.hive; import com.facebook.airlift.configuration.Config; +import com.facebook.airlift.configuration.ConfigDescription; import com.facebook.presto.hive.metastore.CachingHiveMetastore.MetastoreCacheScope; import com.google.common.net.HostAndPort; import io.airlift.units.Duration; @@ -44,6 +45,7 @@ public class MetastoreClientConfig private Duration recordingDuration = new Duration(0, MINUTES); private boolean partitionVersioningEnabled; private MetastoreCacheScope metastoreCacheScope = MetastoreCacheScope.ALL; + private boolean metastoreImpersonationEnabled; public HostAndPort getMetastoreSocksProxy() { @@ -222,4 +224,17 @@ public MetastoreClientConfig setMetastoreCacheScope(MetastoreCacheScope metastor this.metastoreCacheScope = metastoreCacheScope; return this; } + + public boolean isMetastoreImpersonationEnabled() + { + return metastoreImpersonationEnabled; + } + + @Config("hive.metastore-impersonation-enabled") + @ConfigDescription("Should Presto user be impersonated when communicating with Hive Metastore") + public MetastoreClientConfig setMetastoreImpersonationEnabled(boolean metastoreImpersonationEnabled) + { + this.metastoreImpersonationEnabled = metastoreImpersonationEnabled; + return this; + } } diff --git a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/authentication/HiveMetastoreAuthentication.java b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/authentication/HiveMetastoreAuthentication.java index 67c2ba90297c1..87b3c7817ed67 100644 --- a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/authentication/HiveMetastoreAuthentication.java +++ b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/authentication/HiveMetastoreAuthentication.java @@ -15,7 +15,9 @@ import org.apache.thrift.transport.TTransport; +import java.util.Optional; + public interface HiveMetastoreAuthentication { - TTransport authenticate(TTransport rawTransport, String hiveMetastoreHost); + TTransport authenticate(TTransport rawTransport, String hiveMetastoreHost, Optional tokenString); } diff --git a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/authentication/NoHiveMetastoreAuthentication.java b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/authentication/NoHiveMetastoreAuthentication.java index 6d9cf99d0e698..5d34b7ed6f793 100644 --- a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/authentication/NoHiveMetastoreAuthentication.java +++ b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/authentication/NoHiveMetastoreAuthentication.java @@ -15,11 +15,13 @@ import org.apache.thrift.transport.TTransport; +import java.util.Optional; + public class NoHiveMetastoreAuthentication implements HiveMetastoreAuthentication { @Override - public TTransport authenticate(TTransport rawTransport, String hiveMetastoreHost) + public TTransport authenticate(TTransport rawTransport, String hiveMetastoreHost, Optional tokenString) { return rawTransport; } diff --git a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/CachingHiveMetastore.java b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/CachingHiveMetastore.java index 707601db485ba..97909d774e108 100644 --- a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/CachingHiveMetastore.java +++ b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/CachingHiveMetastore.java @@ -40,6 +40,7 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; +import java.util.Objects; import java.util.Optional; import java.util.OptionalLong; import java.util.Set; @@ -52,6 +53,7 @@ import static com.facebook.presto.hive.metastore.HivePartitionName.hivePartitionName; import static com.facebook.presto.hive.metastore.HiveTableName.hiveTableName; import static com.facebook.presto.hive.metastore.PartitionFilter.partitionFilter; +import static com.google.common.base.MoreObjects.toStringHelper; import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.base.Throwables.throwIfInstanceOf; import static com.google.common.base.Throwables.throwIfUnchecked; @@ -73,27 +75,30 @@ public class CachingHiveMetastore implements ExtendedHiveMetastore { + private static final String NO_IMPERSONATION_USER = "no-impersonation-caching-user"; + public enum MetastoreCacheScope { ALL, PARTITION } protected final ExtendedHiveMetastore delegate; - private final LoadingCache> databaseCache; - private final LoadingCache> databaseNamesCache; - private final LoadingCache> tableCache; - private final LoadingCache>> tableNamesCache; - private final LoadingCache tableStatisticsCache; - private final LoadingCache partitionStatisticsCache; - private final LoadingCache>> viewNamesCache; - private final LoadingCache> partitionCache; - private final LoadingCache> partitionFilterCache; - private final LoadingCache>> partitionNamesCache; - private final LoadingCache> tablePrivilegesCache; - private final LoadingCache> rolesCache; - private final LoadingCache> roleGrantsCache; + private final LoadingCache, Optional> databaseCache; + private final LoadingCache, List> databaseNamesCache; + private final LoadingCache, Optional> tableCache; + private final LoadingCache, Optional>> tableNamesCache; + private final LoadingCache, PartitionStatistics> tableStatisticsCache; + private final LoadingCache, PartitionStatistics> partitionStatisticsCache; + private final LoadingCache, Optional>> viewNamesCache; + private final LoadingCache, Optional> partitionCache; + private final LoadingCache, List> partitionFilterCache; + private final LoadingCache, Optional>> partitionNamesCache; + private final LoadingCache, Set> tablePrivilegesCache; + private final LoadingCache, Set> rolesCache; + private final LoadingCache, Set> roleGrantsCache; private final boolean partitionVersioningEnabled; + private final boolean metastoreImpersonationEnabled; @Inject public CachingHiveMetastore( @@ -104,6 +109,7 @@ public CachingHiveMetastore( this( delegate, executor, + metastoreClientConfig.isMetastoreImpersonationEnabled(), metastoreClientConfig.getMetastoreCacheTtl(), metastoreClientConfig.getMetastoreRefreshInterval(), metastoreClientConfig.getMetastoreCacheMaximumSize(), @@ -114,6 +120,7 @@ public CachingHiveMetastore( public CachingHiveMetastore( ExtendedHiveMetastore delegate, ExecutorService executor, + boolean metastoreImpersonationEnabled, Duration cacheTtl, Duration refreshInterval, long maximumSize, @@ -123,6 +130,7 @@ public CachingHiveMetastore( this( delegate, executor, + metastoreImpersonationEnabled, OptionalLong.of(cacheTtl.toMillis()), refreshInterval.toMillis() >= cacheTtl.toMillis() ? OptionalLong.empty() : OptionalLong.of(refreshInterval.toMillis()), maximumSize, @@ -130,11 +138,12 @@ public CachingHiveMetastore( metastoreCacheScope); } - public static CachingHiveMetastore memoizeMetastore(ExtendedHiveMetastore delegate, long maximumSize) + public static CachingHiveMetastore memoizeMetastore(ExtendedHiveMetastore delegate, boolean isMetastoreImpersonationEnabled, long maximumSize) { return new CachingHiveMetastore( delegate, newDirectExecutorService(), + isMetastoreImpersonationEnabled, OptionalLong.empty(), OptionalLong.empty(), maximumSize, @@ -145,6 +154,7 @@ public static CachingHiveMetastore memoizeMetastore(ExtendedHiveMetastore delega private CachingHiveMetastore( ExtendedHiveMetastore delegate, ExecutorService executor, + boolean metastoreImpersonationEnabled, OptionalLong expiresAfterWriteMillis, OptionalLong refreshMills, long maximumSize, @@ -153,6 +163,7 @@ private CachingHiveMetastore( { this.delegate = requireNonNull(delegate, "delegate is null"); requireNonNull(executor, "executor is null"); + this.metastoreImpersonationEnabled = metastoreImpersonationEnabled; this.partitionVersioningEnabled = partitionVersioningEnabled; OptionalLong cacheExpiresAfterWriteMillis; @@ -196,26 +207,26 @@ private CachingHiveMetastore( .build(asyncReloading(CacheLoader.from(this::loadAllTables), executor)); tableStatisticsCache = newCacheBuilder(cacheExpiresAfterWriteMillis, cacheRefreshMills, cacheMaxSize) - .build(asyncReloading(new CacheLoader() + .build(asyncReloading(new CacheLoader, PartitionStatistics>() { @Override - public PartitionStatistics load(HiveTableName key) + public PartitionStatistics load(KeyAndContext key) { return loadTableColumnStatistics(key); } }, executor)); partitionStatisticsCache = newCacheBuilder(partitionCacheExpiresAfterWriteMillis, partitionCacheRefreshMills, partitionCacheMaxSize) - .build(asyncReloading(new CacheLoader() + .build(asyncReloading(new CacheLoader, PartitionStatistics>() { @Override - public PartitionStatistics load(HivePartitionName key) + public PartitionStatistics load(KeyAndContext key) { return loadPartitionColumnStatistics(key); } @Override - public Map loadAll(Iterable keys) + public Map, PartitionStatistics> loadAll(Iterable> keys) { return loadPartitionColumnStatistics(keys); } @@ -234,26 +245,26 @@ public Map loadAll(Iterable>() + .build(asyncReloading(new CacheLoader, Optional>() { @Override - public Optional load(HivePartitionName partitionName) + public Optional load(KeyAndContext partitionName) { return loadPartitionByName(partitionName); } @Override - public Map> loadAll(Iterable partitionNames) + public Map, Optional> loadAll(Iterable> partitionNames) { return loadPartitionsByNames(partitionNames); } }, executor)); tablePrivilegesCache = newCacheBuilder(cacheExpiresAfterWriteMillis, cacheRefreshMills, cacheMaxSize) - .build(asyncReloading(CacheLoader.from(key -> loadTablePrivileges(key.getDatabase(), key.getTable(), key.getPrincipal())), executor)); + .build(asyncReloading(CacheLoader.from(this::loadTablePrivileges), executor)); rolesCache = newCacheBuilder(cacheExpiresAfterWriteMillis, cacheRefreshMills, cacheMaxSize) - .build(asyncReloading(CacheLoader.from(() -> loadRoles()), executor)); + .build(asyncReloading(CacheLoader.from(this::loadAllRoles), executor)); roleGrantsCache = newCacheBuilder(cacheExpiresAfterWriteMillis, cacheRefreshMills, cacheMaxSize) .build(asyncReloading(CacheLoader.from(this::loadRoleGrants), executor)); @@ -300,149 +311,154 @@ private static Map getAll(LoadingCache cache, Iterable key } @Override - public Optional getDatabase(String databaseName) + public Optional getDatabase(MetastoreContext metastoreContext, String databaseName) { - return get(databaseCache, databaseName); + return get(databaseCache, getCachingKey(metastoreContext, databaseName)); } - private Optional loadDatabase(String databaseName) + private Optional loadDatabase(KeyAndContext databaseName) { - return delegate.getDatabase(databaseName); + return delegate.getDatabase(databaseName.getContext(), databaseName.getKey()); } @Override - public List getAllDatabases() + public List getAllDatabases(MetastoreContext metastoreContext) { - return get(databaseNamesCache, ""); + return get(databaseNamesCache, getCachingKey(metastoreContext, "")); } - private List loadAllDatabases() + private List loadAllDatabases(KeyAndContext key) { - return delegate.getAllDatabases(); + return delegate.getAllDatabases(key.getContext()); } @Override - public Optional
getTable(String databaseName, String tableName) + public Optional
getTable(MetastoreContext metastoreContext, String databaseName, String tableName) { - return get(tableCache, hiveTableName(databaseName, tableName)); + return get(tableCache, getCachingKey(metastoreContext, hiveTableName(databaseName, tableName))); } @Override - public Set getSupportedColumnStatistics(Type type) + public Set getSupportedColumnStatistics(MetastoreContext metastoreContext, Type type) { - return delegate.getSupportedColumnStatistics(type); + return delegate.getSupportedColumnStatistics(metastoreContext, type); } - private Optional
loadTable(HiveTableName hiveTableName) + private Optional
loadTable(KeyAndContext hiveTableName) { - return delegate.getTable(hiveTableName.getDatabaseName(), hiveTableName.getTableName()); + return delegate.getTable(hiveTableName.getContext(), hiveTableName.getKey().getDatabaseName(), hiveTableName.getKey().getTableName()); } @Override - public PartitionStatistics getTableStatistics(String databaseName, String tableName) + public PartitionStatistics getTableStatistics(MetastoreContext metastoreContext, String databaseName, String tableName) { - return get(tableStatisticsCache, hiveTableName(databaseName, tableName)); + return get(tableStatisticsCache, getCachingKey(metastoreContext, hiveTableName(databaseName, tableName))); } - private PartitionStatistics loadTableColumnStatistics(HiveTableName hiveTableName) + private PartitionStatistics loadTableColumnStatistics(KeyAndContext hiveTableName) { - return delegate.getTableStatistics(hiveTableName.getDatabaseName(), hiveTableName.getTableName()); + return delegate.getTableStatistics(hiveTableName.getContext(), hiveTableName.getKey().getDatabaseName(), hiveTableName.getKey().getTableName()); } @Override - public Map getPartitionStatistics(String databaseName, String tableName, Set partitionNames) + public Map getPartitionStatistics(MetastoreContext metastoreContext, String databaseName, String tableName, Set partitionNames) { - List partitions = partitionNames.stream() - .map(partitionName -> HivePartitionName.hivePartitionName(databaseName, tableName, partitionName)) + List> partitions = partitionNames.stream() + .map(partitionName -> getCachingKey(metastoreContext, HivePartitionName.hivePartitionName(databaseName, tableName, partitionName))) .collect(toImmutableList()); - Map statistics = getAll(partitionStatisticsCache, partitions); + Map, PartitionStatistics> statistics = getAll(partitionStatisticsCache, partitions); return statistics.entrySet() .stream() - .collect(toImmutableMap(entry -> entry.getKey().getPartitionName().get(), Entry::getValue)); + .collect(toImmutableMap(entry -> entry.getKey().getKey().getPartitionName().get(), Entry::getValue)); } - private PartitionStatistics loadPartitionColumnStatistics(HivePartitionName partition) + private PartitionStatistics loadPartitionColumnStatistics(KeyAndContext partition) { - String partitionName = partition.getPartitionName().get(); + String partitionName = partition.getKey().getPartitionName().get(); Map partitionStatistics = delegate.getPartitionStatistics( - partition.getHiveTableName().getDatabaseName(), - partition.getHiveTableName().getTableName(), + partition.getContext(), + partition.getKey().getHiveTableName().getDatabaseName(), + partition.getKey().getHiveTableName().getTableName(), ImmutableSet.of(partitionName)); if (!partitionStatistics.containsKey(partitionName)) { - throw new PrestoException(HIVE_PARTITION_DROPPED_DURING_QUERY, "Statistics result does not contain entry for partition: " + partition.getPartitionName()); + throw new PrestoException(HIVE_PARTITION_DROPPED_DURING_QUERY, "Statistics result does not contain entry for partition: " + partition.getKey().getPartitionName()); } return partitionStatistics.get(partitionName); } - private Map loadPartitionColumnStatistics(Iterable keys) + private Map, PartitionStatistics> loadPartitionColumnStatistics(Iterable> keys) { - SetMultimap tablePartitions = stream(keys) - .collect(toImmutableSetMultimap(HivePartitionName::getHiveTableName, key -> key)); - ImmutableMap.Builder result = ImmutableMap.builder(); + SetMultimap, KeyAndContext> tablePartitions = stream(keys) + .collect(toImmutableSetMultimap(nameKey -> new KeyAndContext<>(nameKey.getContext(), nameKey.getKey().getHiveTableName()), nameKey -> nameKey)); + ImmutableMap.Builder, PartitionStatistics> result = ImmutableMap.builder(); tablePartitions.keySet().forEach(table -> { Set partitionNames = tablePartitions.get(table).stream() - .map(partitionName -> partitionName.getPartitionName().get()) + .map(partitionName -> partitionName.getKey().getPartitionName().get()) .collect(toImmutableSet()); - Map partitionStatistics = delegate.getPartitionStatistics(table.getDatabaseName(), table.getTableName(), partitionNames); + Map partitionStatistics = delegate.getPartitionStatistics(table.getContext(), table.getKey().getDatabaseName(), table.getKey().getTableName(), partitionNames); for (String partitionName : partitionNames) { if (!partitionStatistics.containsKey(partitionName)) { throw new PrestoException(HIVE_PARTITION_DROPPED_DURING_QUERY, "Statistics result does not contain entry for partition: " + partitionName); } - result.put(HivePartitionName.hivePartitionName(table, partitionName), partitionStatistics.get(partitionName)); + result.put(new KeyAndContext<>(table.getContext(), HivePartitionName.hivePartitionName(table.getKey(), partitionName)), partitionStatistics.get(partitionName)); } }); return result.build(); } @Override - public void updateTableStatistics(String databaseName, String tableName, Function update) + public void updateTableStatistics(MetastoreContext metastoreContext, String databaseName, String tableName, Function update) { try { - delegate.updateTableStatistics(databaseName, tableName, update); + delegate.updateTableStatistics(metastoreContext, databaseName, tableName, update); } finally { - tableStatisticsCache.invalidate(hiveTableName(databaseName, tableName)); + tableStatisticsCache.asMap().keySet().stream() + .filter(hiveTableNameKey -> hiveTableNameKey.getKey().equals(hiveTableName(databaseName, tableName))) + .forEach(tableStatisticsCache::invalidate); } } @Override - public void updatePartitionStatistics(String databaseName, String tableName, String partitionName, Function update) + public void updatePartitionStatistics(MetastoreContext metastoreContext, String databaseName, String tableName, String partitionName, Function update) { try { - delegate.updatePartitionStatistics(databaseName, tableName, partitionName, update); + delegate.updatePartitionStatistics(metastoreContext, databaseName, tableName, partitionName, update); } finally { - partitionStatisticsCache.invalidate(HivePartitionName.hivePartitionName(databaseName, tableName, partitionName)); + partitionStatisticsCache.asMap().keySet().stream() + .filter(partitionFilterKey -> partitionFilterKey.getKey().equals(hivePartitionName(databaseName, tableName, partitionName))) + .forEach(partitionStatisticsCache::invalidate); } } @Override - public Optional> getAllTables(String databaseName) + public Optional> getAllTables(MetastoreContext metastoreContext, String databaseName) { - return get(tableNamesCache, databaseName); + return get(tableNamesCache, getCachingKey(metastoreContext, databaseName)); } - private Optional> loadAllTables(String databaseName) + private Optional> loadAllTables(KeyAndContext databaseNameKey) { - return delegate.getAllTables(databaseName); + return delegate.getAllTables(databaseNameKey.getContext(), databaseNameKey.getKey()); } @Override - public Optional> getAllViews(String databaseName) + public Optional> getAllViews(MetastoreContext metastoreContext, String databaseName) { - return get(viewNamesCache, databaseName); + return get(viewNamesCache, getCachingKey(metastoreContext, databaseName)); } - private Optional> loadAllViews(String databaseName) + private Optional> loadAllViews(KeyAndContext databaseNameKey) { - return delegate.getAllViews(databaseName); + return delegate.getAllViews(databaseNameKey.getContext(), databaseNameKey.getKey()); } @Override - public void createDatabase(Database database) + public void createDatabase(MetastoreContext metastoreContext, Database database) { try { - delegate.createDatabase(database); + delegate.createDatabase(metastoreContext, database); } finally { invalidateDatabase(database.getDatabaseName()); @@ -450,10 +466,10 @@ public void createDatabase(Database database) } @Override - public void dropDatabase(String databaseName) + public void dropDatabase(MetastoreContext metastoreContext, String databaseName) { try { - delegate.dropDatabase(databaseName); + delegate.dropDatabase(metastoreContext, databaseName); } finally { invalidateDatabase(databaseName); @@ -461,10 +477,10 @@ public void dropDatabase(String databaseName) } @Override - public void renameDatabase(String databaseName, String newDatabaseName) + public void renameDatabase(MetastoreContext metastoreContext, String databaseName, String newDatabaseName) { try { - delegate.renameDatabase(databaseName, newDatabaseName); + delegate.renameDatabase(metastoreContext, databaseName, newDatabaseName); } finally { invalidateDatabase(databaseName); @@ -474,15 +490,17 @@ public void renameDatabase(String databaseName, String newDatabaseName) protected void invalidateDatabase(String databaseName) { - databaseCache.invalidate(databaseName); + databaseCache.asMap().keySet().stream() + .filter(databaseKey -> databaseKey.getKey().equals(databaseName)) + .forEach(databaseCache::invalidate); databaseNamesCache.invalidateAll(); } @Override - public void createTable(Table table, PrincipalPrivileges principalPrivileges) + public void createTable(MetastoreContext metastoreContext, Table table, PrincipalPrivileges principalPrivileges) { try { - delegate.createTable(table, principalPrivileges); + delegate.createTable(metastoreContext, table, principalPrivileges); } finally { invalidateTable(table.getDatabaseName(), table.getTableName()); @@ -490,10 +508,10 @@ public void createTable(Table table, PrincipalPrivileges principalPrivileges) } @Override - public void dropTable(String databaseName, String tableName, boolean deleteData) + public void dropTable(MetastoreContext metastoreContext, String databaseName, String tableName, boolean deleteData) { try { - delegate.dropTable(databaseName, tableName, deleteData); + delegate.dropTable(metastoreContext, databaseName, tableName, deleteData); } finally { invalidateTable(databaseName, tableName); @@ -501,10 +519,10 @@ public void dropTable(String databaseName, String tableName, boolean deleteData) } @Override - public void replaceTable(String databaseName, String tableName, Table newTable, PrincipalPrivileges principalPrivileges) + public void replaceTable(MetastoreContext metastoreContext, String databaseName, String tableName, Table newTable, PrincipalPrivileges principalPrivileges) { try { - delegate.replaceTable(databaseName, tableName, newTable, principalPrivileges); + delegate.replaceTable(metastoreContext, databaseName, tableName, newTable, principalPrivileges); } finally { invalidateTable(databaseName, tableName); @@ -513,10 +531,10 @@ public void replaceTable(String databaseName, String tableName, Table newTable, } @Override - public void renameTable(String databaseName, String tableName, String newDatabaseName, String newTableName) + public void renameTable(MetastoreContext metastoreContext, String databaseName, String tableName, String newDatabaseName, String newTableName) { try { - delegate.renameTable(databaseName, tableName, newDatabaseName, newTableName); + delegate.renameTable(metastoreContext, databaseName, tableName, newDatabaseName, newTableName); } finally { invalidateTable(databaseName, tableName); @@ -525,10 +543,10 @@ public void renameTable(String databaseName, String tableName, String newDatabas } @Override - public void addColumn(String databaseName, String tableName, String columnName, HiveType columnType, String columnComment) + public void addColumn(MetastoreContext metastoreContext, String databaseName, String tableName, String columnName, HiveType columnType, String columnComment) { try { - delegate.addColumn(databaseName, tableName, columnName, columnType, columnComment); + delegate.addColumn(metastoreContext, databaseName, tableName, columnName, columnType, columnComment); } finally { invalidateTable(databaseName, tableName); @@ -536,10 +554,10 @@ public void addColumn(String databaseName, String tableName, String columnName, } @Override - public void renameColumn(String databaseName, String tableName, String oldColumnName, String newColumnName) + public void renameColumn(MetastoreContext metastoreContext, String databaseName, String tableName, String oldColumnName, String newColumnName) { try { - delegate.renameColumn(databaseName, tableName, oldColumnName, newColumnName); + delegate.renameColumn(metastoreContext, databaseName, tableName, oldColumnName, newColumnName); } finally { invalidateTable(databaseName, tableName); @@ -547,10 +565,10 @@ public void renameColumn(String databaseName, String tableName, String oldColumn } @Override - public void dropColumn(String databaseName, String tableName, String columnName) + public void dropColumn(MetastoreContext metastoreContext, String databaseName, String tableName, String columnName) { try { - delegate.dropColumn(databaseName, tableName, columnName); + delegate.dropColumn(metastoreContext, databaseName, tableName, columnName); } finally { invalidateTable(databaseName, tableName); @@ -559,132 +577,159 @@ public void dropColumn(String databaseName, String tableName, String columnName) protected void invalidateTable(String databaseName, String tableName) { - tableCache.invalidate(hiveTableName(databaseName, tableName)); - tableNamesCache.invalidate(databaseName); - viewNamesCache.invalidate(databaseName); + HiveTableName hiveTableName = hiveTableName(databaseName, tableName); + + tableCache.asMap().keySet().stream() + .filter(hiveTableNameKey -> hiveTableNameKey.getKey().equals(hiveTableName)) + .forEach(tableCache::invalidate); + + tableNamesCache.asMap().keySet().stream() + .filter(tableNameKey -> tableNameKey.getKey().equals(databaseName)) + .forEach(tableNamesCache::invalidate); + + viewNamesCache.asMap().keySet().stream() + .filter(viewNameKey -> viewNameKey.getKey().equals(databaseName)) + .forEach(viewNamesCache::invalidate); + tablePrivilegesCache.asMap().keySet().stream() - .filter(userTableKey -> userTableKey.matches(databaseName, tableName)) + .filter(userTableKey -> userTableKey.getKey().matches(databaseName, tableName)) .forEach(tablePrivilegesCache::invalidate); - tableStatisticsCache.invalidate(hiveTableName(databaseName, tableName)); + + tableStatisticsCache.asMap().keySet().stream() + .filter(hiveTableNameKey -> hiveTableNameKey.getKey().equals(hiveTableName)) + .forEach(tableStatisticsCache::invalidate); + invalidatePartitionCache(databaseName, tableName); } @Override - public Optional getPartition(String databaseName, String tableName, List partitionValues) + public Optional getPartition(MetastoreContext metastoreContext, String databaseName, String tableName, List partitionValues) { - HivePartitionName name = hivePartitionName(databaseName, tableName, partitionValues); - return get(partitionCache, name); + return get(partitionCache, getCachingKey(metastoreContext, hivePartitionName(databaseName, tableName, partitionValues))); } @Override - public Optional> getPartitionNames(String databaseName, String tableName) + public Optional> getPartitionNames(MetastoreContext metastoreContext, String databaseName, String tableName) { - return get(partitionNamesCache, hiveTableName(databaseName, tableName)); + return get(partitionNamesCache, getCachingKey(metastoreContext, hiveTableName(databaseName, tableName))); } - private Optional> loadPartitionNames(HiveTableName hiveTableName) + private Optional> loadPartitionNames(KeyAndContext hiveTableNameKey) { - return delegate.getPartitionNames(hiveTableName.getDatabaseName(), hiveTableName.getTableName()); + return delegate.getPartitionNames(hiveTableNameKey.getContext(), hiveTableNameKey.getKey().getDatabaseName(), hiveTableNameKey.getKey().getTableName()); } @Override public List getPartitionNamesByFilter( + MetastoreContext metastoreContext, String databaseName, String tableName, Map partitionPredicates) { if (partitionVersioningEnabled) { - List partitionNamesWithVersion = getPartitionNamesWithVersionByFilter(databaseName, tableName, partitionPredicates); + List partitionNamesWithVersion = getPartitionNamesWithVersionByFilter(metastoreContext, databaseName, tableName, partitionPredicates); List result = partitionNamesWithVersion.stream().map(PartitionNameWithVersion::getPartitionName).collect(toImmutableList()); partitionNamesWithVersion.forEach(partitionNameWithVersion -> invalidateStalePartition(partitionNameWithVersion, databaseName, tableName)); return result; } - return get( - partitionFilterCache, - partitionFilter(databaseName, tableName, partitionPredicates)); + return get(partitionFilterCache, getCachingKey(metastoreContext, partitionFilter(databaseName, tableName, partitionPredicates))); } @Override public List getPartitionNamesWithVersionByFilter( + MetastoreContext metastoreContext, String databaseName, String tableName, Map partitionPredicates) { - return delegate.getPartitionNamesWithVersionByFilter(databaseName, tableName, partitionPredicates); + return delegate.getPartitionNamesWithVersionByFilter(metastoreContext, databaseName, tableName, partitionPredicates); } - private void invalidateStalePartition(PartitionNameWithVersion partitionNameWithVersion, String databaseName, String tableName) + private void invalidateStalePartition( + PartitionNameWithVersion partitionNameWithVersion, + String databaseName, + String tableName) { HivePartitionName hivePartitionName = hivePartitionName(databaseName, tableName, partitionNameWithVersion.getPartitionName()); - Optional partition = partitionCache.getIfPresent(hivePartitionName); - if (partition != null && partition.isPresent()) { - Optional partitionVersion = partition.get().getPartitionVersion(); - if (!partitionVersion.isPresent() || partitionVersion.get() != partitionNameWithVersion.getPartitionVersion()) { - partitionCache.invalidate(hivePartitionName); - partitionStatisticsCache.invalidate(hivePartitionName); - } - } + partitionCache.asMap().keySet().stream() + .filter(partitionNameKey -> partitionNameKey.getKey().equals(hivePartitionName)) + .forEach(partitionNameKey -> { + try { + Partition partition = partitionCache.get(partitionNameKey).get(); + Optional partitionVersion = partition.getPartitionVersion(); + if (!partitionVersion.isPresent() || partitionVersion.get() != partitionNameWithVersion.getPartitionVersion()) { + partitionCache.invalidate(partitionNameKey); + partitionStatisticsCache.invalidate(partitionNameKey); + } + } + catch (ExecutionException e) { + e.printStackTrace(); + } + }); } - private List loadPartitionNamesByFilter(PartitionFilter partitionFilter) + private List loadPartitionNamesByFilter(KeyAndContext partitionFilterKey) { return delegate.getPartitionNamesByFilter( - partitionFilter.getHiveTableName().getDatabaseName(), - partitionFilter.getHiveTableName().getTableName(), - partitionFilter.getPartitionPredicates()); + partitionFilterKey.getContext(), + partitionFilterKey.getKey().getHiveTableName().getDatabaseName(), + partitionFilterKey.getKey().getHiveTableName().getTableName(), + partitionFilterKey.getKey().getPartitionPredicates()); } @Override - public Map> getPartitionsByNames(String databaseName, String tableName, List partitionNames) + public Map> getPartitionsByNames(MetastoreContext metastoreContext, String databaseName, String tableName, List partitionNames) { - Iterable names = transform(partitionNames, name -> HivePartitionName.hivePartitionName(databaseName, tableName, name)); + Iterable> names = transform(partitionNames, name -> getCachingKey(metastoreContext, HivePartitionName.hivePartitionName(databaseName, tableName, name))); - Map> all = getAll(partitionCache, names); + Map, Optional> all = getAll(partitionCache, names); ImmutableMap.Builder> partitionsByName = ImmutableMap.builder(); - for (Entry> entry : all.entrySet()) { - partitionsByName.put(entry.getKey().getPartitionName().get(), entry.getValue()); + for (Entry, Optional> entry : all.entrySet()) { + partitionsByName.put(entry.getKey().getKey().getPartitionName().get(), entry.getValue()); } return partitionsByName.build(); } - private Optional loadPartitionByName(HivePartitionName partitionName) + private Optional loadPartitionByName(KeyAndContext partitionName) { return delegate.getPartition( - partitionName.getHiveTableName().getDatabaseName(), - partitionName.getHiveTableName().getTableName(), - partitionName.getPartitionValues()); + partitionName.getContext(), + partitionName.getKey().getHiveTableName().getDatabaseName(), + partitionName.getKey().getHiveTableName().getTableName(), + partitionName.getKey().getPartitionValues()); } - private Map> loadPartitionsByNames(Iterable partitionNames) + private Map, Optional> loadPartitionsByNames(Iterable> partitionNamesKey) { - requireNonNull(partitionNames, "partitionNames is null"); - checkArgument(!Iterables.isEmpty(partitionNames), "partitionNames is empty"); + requireNonNull(partitionNamesKey, "partitionNames is null"); + checkArgument(!Iterables.isEmpty(partitionNamesKey), "partitionNames is empty"); - HivePartitionName firstPartition = Iterables.get(partitionNames, 0); + KeyAndContext firstPartitionKey = Iterables.get(partitionNamesKey, 0); - HiveTableName hiveTableName = firstPartition.getHiveTableName(); + HiveTableName hiveTableName = firstPartitionKey.getKey().getHiveTableName(); String databaseName = hiveTableName.getDatabaseName(); String tableName = hiveTableName.getTableName(); List partitionsToFetch = new ArrayList<>(); - for (HivePartitionName partitionName : partitionNames) { - checkArgument(partitionName.getHiveTableName().equals(hiveTableName), "Expected table name %s but got %s", hiveTableName, partitionName.getHiveTableName()); - partitionsToFetch.add(partitionName.getPartitionName().get()); + for (KeyAndContext partitionNameKey : partitionNamesKey) { + checkArgument(partitionNameKey.getKey().getHiveTableName().equals(hiveTableName), "Expected table name %s but got %s", hiveTableName, partitionNameKey.getKey().getHiveTableName()); + checkArgument(partitionNameKey.getContext().equals(firstPartitionKey.getContext()), "Expected context %s but got %s", firstPartitionKey.getContext(), partitionNameKey.getContext()); + partitionsToFetch.add(partitionNameKey.getKey().getPartitionName().get()); } - ImmutableMap.Builder> partitions = ImmutableMap.builder(); - Map> partitionsByNames = delegate.getPartitionsByNames(databaseName, tableName, partitionsToFetch); + ImmutableMap.Builder, Optional> partitions = ImmutableMap.builder(); + Map> partitionsByNames = delegate.getPartitionsByNames(firstPartitionKey.getContext(), databaseName, tableName, partitionsToFetch); for (Entry> entry : partitionsByNames.entrySet()) { - partitions.put(HivePartitionName.hivePartitionName(hiveTableName, entry.getKey()), entry.getValue()); + partitions.put(getCachingKey(firstPartitionKey.getContext(), HivePartitionName.hivePartitionName(hiveTableName, entry.getKey())), entry.getValue()); } return partitions.build(); } @Override - public void addPartitions(String databaseName, String tableName, List partitions) + public void addPartitions(MetastoreContext metastoreContext, String databaseName, String tableName, List partitions) { try { - delegate.addPartitions(databaseName, tableName, partitions); + delegate.addPartitions(metastoreContext, databaseName, tableName, partitions); } finally { // todo do we need to invalidate all partitions? @@ -693,10 +738,10 @@ public void addPartitions(String databaseName, String tableName, List parts, boolean deleteData) + public void dropPartition(MetastoreContext metastoreContext, String databaseName, String tableName, List parts, boolean deleteData) { try { - delegate.dropPartition(databaseName, tableName, parts, deleteData); + delegate.dropPartition(metastoreContext, databaseName, tableName, parts, deleteData); } finally { invalidatePartitionCache(databaseName, tableName); @@ -704,10 +749,10 @@ public void dropPartition(String databaseName, String tableName, List pa } @Override - public void alterPartition(String databaseName, String tableName, PartitionWithStatistics partition) + public void alterPartition(MetastoreContext metastoreContext, String databaseName, String tableName, PartitionWithStatistics partition) { try { - delegate.alterPartition(databaseName, tableName, partition); + delegate.alterPartition(metastoreContext, databaseName, tableName, partition); } finally { invalidatePartitionCache(databaseName, tableName); @@ -715,10 +760,10 @@ public void alterPartition(String databaseName, String tableName, PartitionWithS } @Override - public void createRole(String role, String grantor) + public void createRole(MetastoreContext metastoreContext, String role, String grantor) { try { - delegate.createRole(role, grantor); + delegate.createRole(metastoreContext, role, grantor); } finally { rolesCache.invalidateAll(); @@ -726,10 +771,10 @@ public void createRole(String role, String grantor) } @Override - public void dropRole(String role) + public void dropRole(MetastoreContext metastoreContext, String role) { try { - delegate.dropRole(role); + delegate.dropRole(metastoreContext, role); } finally { rolesCache.invalidateAll(); @@ -738,21 +783,21 @@ public void dropRole(String role) } @Override - public Set listRoles() + public Set listRoles(MetastoreContext metastoreContext) { - return get(rolesCache, ""); + return get(rolesCache, getCachingKey(metastoreContext, "")); } - private Set loadRoles() + private Set loadAllRoles(KeyAndContext rolesKey) { - return delegate.listRoles(); + return delegate.listRoles(rolesKey.getContext()); } @Override - public void grantRoles(Set roles, Set grantees, boolean withAdminOption, PrestoPrincipal grantor) + public void grantRoles(MetastoreContext metastoreContext, Set roles, Set grantees, boolean withAdminOption, PrestoPrincipal grantor) { try { - delegate.grantRoles(roles, grantees, withAdminOption, grantor); + delegate.grantRoles(metastoreContext, roles, grantees, withAdminOption, grantor); } finally { roleGrantsCache.invalidateAll(); @@ -760,10 +805,10 @@ public void grantRoles(Set roles, Set grantees, boolean } @Override - public void revokeRoles(Set roles, Set grantees, boolean adminOptionFor, PrestoPrincipal grantor) + public void revokeRoles(MetastoreContext metastoreContext, Set roles, Set grantees, boolean adminOptionFor, PrestoPrincipal grantor) { try { - delegate.revokeRoles(roles, grantees, adminOptionFor, grantor); + delegate.revokeRoles(metastoreContext, roles, grantees, adminOptionFor, grantor); } finally { roleGrantsCache.invalidateAll(); @@ -771,62 +816,129 @@ public void revokeRoles(Set roles, Set grantees, boolea } @Override - public Set listRoleGrants(PrestoPrincipal principal) + public Set listRoleGrants(MetastoreContext metastoreContext, PrestoPrincipal principal) { - return get(roleGrantsCache, principal); + return get(roleGrantsCache, getCachingKey(metastoreContext, principal)); } - private Set loadRoleGrants(PrestoPrincipal principal) + private Set loadRoleGrants(KeyAndContext principalKey) { - return delegate.listRoleGrants(principal); + return delegate.listRoleGrants(principalKey.getContext(), principalKey.getKey()); } private void invalidatePartitionCache(String databaseName, String tableName) { HiveTableName hiveTableName = hiveTableName(databaseName, tableName); - partitionNamesCache.invalidate(hiveTableName); + partitionNamesCache.asMap().keySet().stream() + .filter(hiveTableNameKey -> hiveTableNameKey.getKey().equals(hiveTableName)) + .forEach(partitionNamesCache::invalidate); partitionCache.asMap().keySet().stream() - .filter(partitionName -> partitionName.getHiveTableName().equals(hiveTableName)) + .filter(partitionNameKey -> partitionNameKey.getKey().getHiveTableName().equals(hiveTableName)) .forEach(partitionCache::invalidate); partitionFilterCache.asMap().keySet().stream() - .filter(partitionFilter -> partitionFilter.getHiveTableName().equals(hiveTableName)) + .filter(partitionFilterKey -> partitionFilterKey.getKey().getHiveTableName().equals(hiveTableName)) .forEach(partitionFilterCache::invalidate); partitionStatisticsCache.asMap().keySet().stream() - .filter(partitionFilter -> partitionFilter.getHiveTableName().equals(hiveTableName)) + .filter(partitionFilterKey -> partitionFilterKey.getKey().getHiveTableName().equals(hiveTableName)) .forEach(partitionStatisticsCache::invalidate); } + private void invalidateTablePrivilegesCache(PrestoPrincipal grantee, String databaseName, String tableName) + { + UserTableKey userTableKey = new UserTableKey(grantee, databaseName, tableName); + tablePrivilegesCache.asMap().keySet().stream() + .filter(tablePrivilegesCacheKey -> tablePrivilegesCacheKey.getKey().equals(userTableKey)) + .forEach(tablePrivilegesCache::invalidate); + } + @Override - public void grantTablePrivileges(String databaseName, String tableName, PrestoPrincipal grantee, Set privileges) + public void grantTablePrivileges(MetastoreContext metastoreContext, String databaseName, String tableName, PrestoPrincipal grantee, Set privileges) { try { - delegate.grantTablePrivileges(databaseName, tableName, grantee, privileges); + delegate.grantTablePrivileges(metastoreContext, databaseName, tableName, grantee, privileges); } finally { - tablePrivilegesCache.invalidate(new UserTableKey(grantee, databaseName, tableName)); + invalidateTablePrivilegesCache(grantee, databaseName, tableName); } } @Override - public void revokeTablePrivileges(String databaseName, String tableName, PrestoPrincipal grantee, Set privileges) + public void revokeTablePrivileges(MetastoreContext metastoreContext, String databaseName, String tableName, PrestoPrincipal grantee, Set privileges) { try { - delegate.revokeTablePrivileges(databaseName, tableName, grantee, privileges); + delegate.revokeTablePrivileges(metastoreContext, databaseName, tableName, grantee, privileges); } finally { - tablePrivilegesCache.invalidate(new UserTableKey(grantee, databaseName, tableName)); + invalidateTablePrivilegesCache(grantee, databaseName, tableName); } } @Override - public Set listTablePrivileges(String databaseName, String tableName, PrestoPrincipal principal) + public Set listTablePrivileges(MetastoreContext metastoreContext, String databaseName, String tableName, PrestoPrincipal principal) { - return get(tablePrivilegesCache, new UserTableKey(principal, databaseName, tableName)); + return get(tablePrivilegesCache, getCachingKey(metastoreContext, new UserTableKey(principal, databaseName, tableName))); + } + + public Set loadTablePrivileges(KeyAndContext loadTablePrivilegesKey) + { + return delegate.listTablePrivileges(loadTablePrivilegesKey.getContext(), loadTablePrivilegesKey.getKey().getDatabase(), loadTablePrivilegesKey.getKey().getTable(), loadTablePrivilegesKey.getKey().getPrincipal()); + } + + private static class KeyAndContext + { + private final MetastoreContext context; + private final T key; + + public KeyAndContext(MetastoreContext context, T key) + { + this.context = requireNonNull(context, "identity is null"); + this.key = requireNonNull(key, "key is null"); + } + + public MetastoreContext getContext() + { + return context; + } + + public T getKey() + { + return key; + } + + @Override + public boolean equals(Object o) + { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + KeyAndContext other = (KeyAndContext) o; + return Objects.equals(context, other.context) && + Objects.equals(key, other.key); + } + + @Override + public int hashCode() + { + return Objects.hash(context.getUsername(), key); + } + + @Override + public String toString() + { + return toStringHelper(this) + .add("context", context) + .add("key", key) + .toString(); + } } - public Set loadTablePrivileges(String databaseName, String tableName, PrestoPrincipal principal) + private KeyAndContext getCachingKey(MetastoreContext context, T key) { - return delegate.listTablePrivileges(databaseName, tableName, principal); + MetastoreContext metastoreContext = metastoreImpersonationEnabled ? context : new MetastoreContext(NO_IMPERSONATION_USER); + return new KeyAndContext<>(metastoreContext, key); } private static CacheBuilder newCacheBuilder(OptionalLong expiresAfterWriteMillis, OptionalLong refreshMillis, long maximumSize) diff --git a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/ExtendedHiveMetastore.java b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/ExtendedHiveMetastore.java index ded75e479e3b2..e197d5f0f83b4 100644 --- a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/ExtendedHiveMetastore.java +++ b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/ExtendedHiveMetastore.java @@ -28,88 +28,90 @@ public interface ExtendedHiveMetastore { - Optional getDatabase(String databaseName); + Optional getDatabase(MetastoreContext metastoreContext, String databaseName); - List getAllDatabases(); + List getAllDatabases(MetastoreContext metastoreContext); - Optional
getTable(String databaseName, String tableName); + Optional
getTable(MetastoreContext metastoreContext, String databaseName, String tableName); - Set getSupportedColumnStatistics(Type type); + Set getSupportedColumnStatistics(MetastoreContext metastoreContext, Type type); - PartitionStatistics getTableStatistics(String databaseName, String tableName); + PartitionStatistics getTableStatistics(MetastoreContext metastoreContext, String databaseName, String tableName); - Map getPartitionStatistics(String databaseName, String tableName, Set partitionNames); + Map getPartitionStatistics(MetastoreContext metastoreContext, String databaseName, String tableName, Set partitionNames); - void updateTableStatistics(String databaseName, String tableName, Function update); + void updateTableStatistics(MetastoreContext metastoreContext, String databaseName, String tableName, Function update); - void updatePartitionStatistics(String databaseName, String tableName, String partitionName, Function update); + void updatePartitionStatistics(MetastoreContext metastoreContext, String databaseName, String tableName, String partitionName, Function update); - Optional> getAllTables(String databaseName); + Optional> getAllTables(MetastoreContext metastoreContext, String databaseName); - Optional> getAllViews(String databaseName); + Optional> getAllViews(MetastoreContext metastoreContext, String databaseName); - void createDatabase(Database database); + void createDatabase(MetastoreContext metastoreContext, Database database); - void dropDatabase(String databaseName); + void dropDatabase(MetastoreContext metastoreContext, String databaseName); - void renameDatabase(String databaseName, String newDatabaseName); + void renameDatabase(MetastoreContext metastoreContext, String databaseName, String newDatabaseName); - void createTable(Table table, PrincipalPrivileges principalPrivileges); + void createTable(MetastoreContext metastoreContext, Table table, PrincipalPrivileges principalPrivileges); - void dropTable(String databaseName, String tableName, boolean deleteData); + void dropTable(MetastoreContext metastoreContext, String databaseName, String tableName, boolean deleteData); /** * This should only be used if the semantic here is drop and add. Trying to * alter one field of a table object previously acquired from getTable is * probably not what you want. */ - void replaceTable(String databaseName, String tableName, Table newTable, PrincipalPrivileges principalPrivileges); + void replaceTable(MetastoreContext metastoreContext, String databaseName, String tableName, Table newTable, PrincipalPrivileges principalPrivileges); - void renameTable(String databaseName, String tableName, String newDatabaseName, String newTableName); + void renameTable(MetastoreContext metastoreContext, String databaseName, String tableName, String newDatabaseName, String newTableName); - void addColumn(String databaseName, String tableName, String columnName, HiveType columnType, String columnComment); + void addColumn(MetastoreContext metastoreContext, String databaseName, String tableName, String columnName, HiveType columnType, String columnComment); - void renameColumn(String databaseName, String tableName, String oldColumnName, String newColumnName); + void renameColumn(MetastoreContext metastoreContext, String databaseName, String tableName, String oldColumnName, String newColumnName); - void dropColumn(String databaseName, String tableName, String columnName); + void dropColumn(MetastoreContext metastoreContext, String databaseName, String tableName, String columnName); - Optional getPartition(String databaseName, String tableName, List partitionValues); + Optional getPartition(MetastoreContext metastoreContext, String databaseName, String tableName, List partitionValues); - Optional> getPartitionNames(String databaseName, String tableName); + Optional> getPartitionNames(MetastoreContext metastoreContext, String databaseName, String tableName); List getPartitionNamesByFilter( + MetastoreContext metastoreContext, String databaseName, String tableName, Map partitionPredicates); List getPartitionNamesWithVersionByFilter( + MetastoreContext metastoreContext, String databaseName, String tableName, Map partitionPredicates); - Map> getPartitionsByNames(String databaseName, String tableName, List partitionNames); + Map> getPartitionsByNames(MetastoreContext metastoreContext, String databaseName, String tableName, List partitionNames); - void addPartitions(String databaseName, String tableName, List partitions); + void addPartitions(MetastoreContext metastoreContext, String databaseName, String tableName, List partitions); - void dropPartition(String databaseName, String tableName, List parts, boolean deleteData); + void dropPartition(MetastoreContext metastoreContext, String databaseName, String tableName, List parts, boolean deleteData); - void alterPartition(String databaseName, String tableName, PartitionWithStatistics partition); + void alterPartition(MetastoreContext metastoreContext, String databaseName, String tableName, PartitionWithStatistics partition); - void createRole(String role, String grantor); + void createRole(MetastoreContext metastoreContext, String role, String grantor); - void dropRole(String role); + void dropRole(MetastoreContext metastoreContext, String role); - Set listRoles(); + Set listRoles(MetastoreContext metastoreContext); - void grantRoles(Set roles, Set grantees, boolean withAdminOption, PrestoPrincipal grantor); + void grantRoles(MetastoreContext metastoreContext, Set roles, Set grantees, boolean withAdminOption, PrestoPrincipal grantor); - void revokeRoles(Set roles, Set grantees, boolean adminOptionFor, PrestoPrincipal grantor); + void revokeRoles(MetastoreContext metastoreContext, Set roles, Set grantees, boolean adminOptionFor, PrestoPrincipal grantor); - Set listRoleGrants(PrestoPrincipal principal); + Set listRoleGrants(MetastoreContext metastoreContext, PrestoPrincipal principal); - void grantTablePrivileges(String databaseName, String tableName, PrestoPrincipal grantee, Set privileges); + void grantTablePrivileges(MetastoreContext metastoreContext, String databaseName, String tableName, PrestoPrincipal grantee, Set privileges); - void revokeTablePrivileges(String databaseName, String tableName, PrestoPrincipal grantee, Set privileges); + void revokeTablePrivileges(MetastoreContext metastoreContext, String databaseName, String tableName, PrestoPrincipal grantee, Set privileges); - Set listTablePrivileges(String databaseName, String tableName, PrestoPrincipal principal); + Set listTablePrivileges(MetastoreContext metastoreContext, String databaseName, String tableName, PrestoPrincipal principal); } diff --git a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/HivePageSinkMetadataProvider.java b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/HivePageSinkMetadataProvider.java index 7abfacdb7b395..1a33bebf68742 100644 --- a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/HivePageSinkMetadataProvider.java +++ b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/HivePageSinkMetadataProvider.java @@ -28,14 +28,16 @@ public class HivePageSinkMetadataProvider private final SchemaTableName schemaTableName; private final Optional
table; private final Map, Optional> modifiedPartitions; + private final MetastoreContext metastoreContext; - public HivePageSinkMetadataProvider(HivePageSinkMetadata pageSinkMetadata, ExtendedHiveMetastore delegate) + public HivePageSinkMetadataProvider(HivePageSinkMetadata pageSinkMetadata, ExtendedHiveMetastore delegate, MetastoreContext metastoreContext) { requireNonNull(pageSinkMetadata, "pageSinkMetadata is null"); this.delegate = delegate; this.schemaTableName = pageSinkMetadata.getSchemaTableName(); this.table = pageSinkMetadata.getTable(); this.modifiedPartitions = pageSinkMetadata.getModifiedPartitions(); + this.metastoreContext = requireNonNull(metastoreContext, "metastoreContext is null"); } public Optional
getTable() @@ -51,7 +53,7 @@ public Optional getPartition(List partitionValues) } Optional modifiedPartition = modifiedPartitions.get(partitionValues); if (modifiedPartition == null) { - return delegate.getPartition(schemaTableName.getSchemaName(), schemaTableName.getTableName(), partitionValues); + return delegate.getPartition(metastoreContext, schemaTableName.getSchemaName(), schemaTableName.getTableName(), partitionValues); } else { return modifiedPartition; diff --git a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/MetastoreContext.java b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/MetastoreContext.java new file mode 100644 index 0000000000000..7b0cfa3bc63c0 --- /dev/null +++ b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/MetastoreContext.java @@ -0,0 +1,70 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.facebook.presto.hive.metastore; + +import com.facebook.presto.spi.security.ConnectorIdentity; + +import java.util.Objects; + +import static com.google.common.base.MoreObjects.toStringHelper; +import static java.util.Objects.requireNonNull; + +public class MetastoreContext +{ + private final String username; + + public MetastoreContext(ConnectorIdentity identity) + { + requireNonNull(identity, "identity is null"); + this.username = requireNonNull(identity.getUser(), "identity.getUser() is null"); + } + + public MetastoreContext(String username) + { + this.username = requireNonNull(username, "username is null"); + } + + public String getUsername() + { + return username; + } + + @Override + public String toString() + { + return toStringHelper(this) + .add("username", username) + .toString(); + } + + @Override + public boolean equals(Object o) + { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + MetastoreContext other = (MetastoreContext) o; + return Objects.equals(username, other.username); + } + + @Override + public int hashCode() + { + return Objects.hash(username); + } +} diff --git a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/MetastoreUtil.java b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/MetastoreUtil.java index 9545273aa3aea..490b38b256d64 100644 --- a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/MetastoreUtil.java +++ b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/MetastoreUtil.java @@ -411,9 +411,9 @@ public static void verifyOnline(SchemaTableName tableName, Optional part } } - public static void verifyCanDropColumn(ExtendedHiveMetastore metastore, String databaseName, String tableName, String columnName) + public static void verifyCanDropColumn(ExtendedHiveMetastore metastore, MetastoreContext metastoreContext, String databaseName, String tableName, String columnName) { - Table table = metastore.getTable(databaseName, tableName) + Table table = metastore.getTable(metastoreContext, databaseName, tableName) .orElseThrow(() -> new TableNotFoundException(new SchemaTableName(databaseName, tableName))); if (table.getPartitionColumns().stream().anyMatch(column -> column.getName().equals(columnName))) { diff --git a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/RecordingHiveMetastore.java b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/RecordingHiveMetastore.java index 6091947abbb82..e84afc4c465b8 100644 --- a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/RecordingHiveMetastore.java +++ b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/RecordingHiveMetastore.java @@ -182,169 +182,170 @@ private static List> toPairs(Cache cache) } @Override - public Optional getDatabase(String databaseName) + public Optional getDatabase(MetastoreContext metastoreContext, String databaseName) { - return loadValue(databaseCache, databaseName, () -> delegate.getDatabase(databaseName)); + return loadValue(databaseCache, databaseName, () -> delegate.getDatabase(metastoreContext, databaseName)); } @Override - public List getAllDatabases() + public List getAllDatabases(MetastoreContext metastoreContext) { if (replay) { return allDatabases.orElseThrow(() -> new PrestoException(NOT_FOUND, "Missing entry for all databases")); } - List result = delegate.getAllDatabases(); + List result = delegate.getAllDatabases(metastoreContext); allDatabases = Optional.of(result); return result; } @Override - public Optional
getTable(String databaseName, String tableName) + public Optional
getTable(MetastoreContext metastoreContext, String databaseName, String tableName) { - return loadValue(tableCache, hiveTableName(databaseName, tableName), () -> delegate.getTable(databaseName, tableName)); + return loadValue(tableCache, hiveTableName(databaseName, tableName), () -> delegate.getTable(metastoreContext, databaseName, tableName)); } @Override - public Set getSupportedColumnStatistics(Type type) + public Set getSupportedColumnStatistics(MetastoreContext metastoreContext, Type type) { - return loadValue(supportedColumnStatisticsCache, type.getTypeSignature().toString(), () -> delegate.getSupportedColumnStatistics(type)); + return loadValue(supportedColumnStatisticsCache, type.getTypeSignature().toString(), () -> delegate.getSupportedColumnStatistics(metastoreContext, type)); } @Override - public PartitionStatistics getTableStatistics(String databaseName, String tableName) + public PartitionStatistics getTableStatistics(MetastoreContext metastoreContext, String databaseName, String tableName) { return loadValue( tableStatisticsCache, hiveTableName(databaseName, tableName), - () -> delegate.getTableStatistics(databaseName, tableName)); + () -> delegate.getTableStatistics(metastoreContext, databaseName, tableName)); } @Override - public Map getPartitionStatistics(String databaseName, String tableName, Set partitionNames) + public Map getPartitionStatistics(MetastoreContext metastoreContext, String databaseName, String tableName, Set partitionNames) { return loadValue( partitionStatisticsCache, getHivePartitionNames(databaseName, tableName, partitionNames), - () -> delegate.getPartitionStatistics(databaseName, tableName, partitionNames)); + () -> delegate.getPartitionStatistics(metastoreContext, databaseName, tableName, partitionNames)); } @Override - public void updateTableStatistics(String databaseName, String tableName, Function update) + public void updateTableStatistics(MetastoreContext metastoreContext, String databaseName, String tableName, Function update) { verifyRecordingMode(); - delegate.updateTableStatistics(databaseName, tableName, update); + delegate.updateTableStatistics(metastoreContext, databaseName, tableName, update); } @Override - public void updatePartitionStatistics(String databaseName, String tableName, String partitionName, Function update) + public void updatePartitionStatistics(MetastoreContext metastoreContext, String databaseName, String tableName, String partitionName, Function update) { verifyRecordingMode(); - delegate.updatePartitionStatistics(databaseName, tableName, partitionName, update); + delegate.updatePartitionStatistics(metastoreContext, databaseName, tableName, partitionName, update); } @Override - public Optional> getAllTables(String databaseName) + public Optional> getAllTables(MetastoreContext metastoreContext, String databaseName) { - return loadValue(allTablesCache, databaseName, () -> delegate.getAllTables(databaseName)); + return loadValue(allTablesCache, databaseName, () -> delegate.getAllTables(metastoreContext, databaseName)); } @Override - public Optional> getAllViews(String databaseName) + public Optional> getAllViews(MetastoreContext metastoreContext, String databaseName) { - return loadValue(allViewsCache, databaseName, () -> delegate.getAllViews(databaseName)); + return loadValue(allViewsCache, databaseName, () -> delegate.getAllViews(metastoreContext, databaseName)); } @Override - public void createDatabase(Database database) + public void createDatabase(MetastoreContext metastoreContext, Database database) { verifyRecordingMode(); - delegate.createDatabase(database); + delegate.createDatabase(metastoreContext, database); } @Override - public void dropDatabase(String databaseName) + public void dropDatabase(MetastoreContext metastoreContext, String databaseName) { verifyRecordingMode(); - delegate.dropDatabase(databaseName); + delegate.dropDatabase(metastoreContext, databaseName); } @Override - public void renameDatabase(String databaseName, String newDatabaseName) + public void renameDatabase(MetastoreContext metastoreContext, String databaseName, String newDatabaseName) { verifyRecordingMode(); - delegate.renameDatabase(databaseName, newDatabaseName); + delegate.renameDatabase(metastoreContext, databaseName, newDatabaseName); } @Override - public void createTable(Table table, PrincipalPrivileges principalPrivileges) + public void createTable(MetastoreContext metastoreContext, Table table, PrincipalPrivileges principalPrivileges) { verifyRecordingMode(); - delegate.createTable(table, principalPrivileges); + delegate.createTable(metastoreContext, table, principalPrivileges); } @Override - public void dropTable(String databaseName, String tableName, boolean deleteData) + public void dropTable(MetastoreContext metastoreContext, String databaseName, String tableName, boolean deleteData) { verifyRecordingMode(); - delegate.dropTable(databaseName, tableName, deleteData); + delegate.dropTable(metastoreContext, databaseName, tableName, deleteData); } @Override - public void replaceTable(String databaseName, String tableName, Table newTable, PrincipalPrivileges principalPrivileges) + public void replaceTable(MetastoreContext metastoreContext, String databaseName, String tableName, Table newTable, PrincipalPrivileges principalPrivileges) { verifyRecordingMode(); - delegate.replaceTable(databaseName, tableName, newTable, principalPrivileges); + delegate.replaceTable(metastoreContext, databaseName, tableName, newTable, principalPrivileges); } @Override - public void renameTable(String databaseName, String tableName, String newDatabaseName, String newTableName) + public void renameTable(MetastoreContext metastoreContext, String databaseName, String tableName, String newDatabaseName, String newTableName) { verifyRecordingMode(); - delegate.renameTable(databaseName, tableName, newDatabaseName, newTableName); + delegate.renameTable(metastoreContext, databaseName, tableName, newDatabaseName, newTableName); } @Override - public void addColumn(String databaseName, String tableName, String columnName, HiveType columnType, String columnComment) + public void addColumn(MetastoreContext metastoreContext, String databaseName, String tableName, String columnName, HiveType columnType, String columnComment) { verifyRecordingMode(); - delegate.addColumn(databaseName, tableName, columnName, columnType, columnComment); + delegate.addColumn(metastoreContext, databaseName, tableName, columnName, columnType, columnComment); } @Override - public void renameColumn(String databaseName, String tableName, String oldColumnName, String newColumnName) + public void renameColumn(MetastoreContext metastoreContext, String databaseName, String tableName, String oldColumnName, String newColumnName) { verifyRecordingMode(); - delegate.renameColumn(databaseName, tableName, oldColumnName, newColumnName); + delegate.renameColumn(metastoreContext, databaseName, tableName, oldColumnName, newColumnName); } @Override - public void dropColumn(String databaseName, String tableName, String columnName) + public void dropColumn(MetastoreContext metastoreContext, String databaseName, String tableName, String columnName) { verifyRecordingMode(); - delegate.dropColumn(databaseName, tableName, columnName); + delegate.dropColumn(metastoreContext, databaseName, tableName, columnName); } @Override - public Optional getPartition(String databaseName, String tableName, List partitionValues) + public Optional getPartition(MetastoreContext metastoreContext, String databaseName, String tableName, List partitionValues) { return loadValue( partitionCache, hivePartitionName(databaseName, tableName, partitionValues), - () -> delegate.getPartition(databaseName, tableName, partitionValues)); + () -> delegate.getPartition(metastoreContext, databaseName, tableName, partitionValues)); } @Override - public Optional> getPartitionNames(String databaseName, String tableName) + public Optional> getPartitionNames(MetastoreContext metastoreContext, String databaseName, String tableName) { return loadValue( partitionNamesCache, hiveTableName(databaseName, tableName), - () -> delegate.getPartitionNames(databaseName, tableName)); + () -> delegate.getPartitionNames(metastoreContext, databaseName, tableName)); } @Override public List getPartitionNamesByFilter( + MetastoreContext metastoreContext, String databaseName, String tableName, Map partitionPredicates) @@ -352,11 +353,12 @@ public List getPartitionNamesByFilter( return loadValue( partitionNamesByFilterCache, partitionFilter(databaseName, tableName, partitionPredicates).toString(), - () -> delegate.getPartitionNamesByFilter(databaseName, tableName, partitionPredicates)); + () -> delegate.getPartitionNamesByFilter(metastoreContext, databaseName, tableName, partitionPredicates)); } @Override public List getPartitionNamesWithVersionByFilter( + MetastoreContext metastoreContext, String databaseName, String tableName, Map partitionPredicates) @@ -365,56 +367,56 @@ public List getPartitionNamesWithVersionByFilter( } @Override - public Map> getPartitionsByNames(String databaseName, String tableName, List partitionNames) + public Map> getPartitionsByNames(MetastoreContext metastoreContext, String databaseName, String tableName, List partitionNames) { return loadValue( partitionsByNamesCache, getHivePartitionNames(databaseName, tableName, ImmutableSet.copyOf(partitionNames)), - () -> delegate.getPartitionsByNames(databaseName, tableName, partitionNames)); + () -> delegate.getPartitionsByNames(metastoreContext, databaseName, tableName, partitionNames)); } @Override - public void addPartitions(String databaseName, String tableName, List partitions) + public void addPartitions(MetastoreContext metastoreContext, String databaseName, String tableName, List partitions) { verifyRecordingMode(); - delegate.addPartitions(databaseName, tableName, partitions); + delegate.addPartitions(metastoreContext, databaseName, tableName, partitions); } @Override - public void dropPartition(String databaseName, String tableName, List parts, boolean deleteData) + public void dropPartition(MetastoreContext metastoreContext, String databaseName, String tableName, List parts, boolean deleteData) { verifyRecordingMode(); - delegate.dropPartition(databaseName, tableName, parts, deleteData); + delegate.dropPartition(metastoreContext, databaseName, tableName, parts, deleteData); } @Override - public void alterPartition(String databaseName, String tableName, PartitionWithStatistics partition) + public void alterPartition(MetastoreContext metastoreContext, String databaseName, String tableName, PartitionWithStatistics partition) { verifyRecordingMode(); - delegate.alterPartition(databaseName, tableName, partition); + delegate.alterPartition(metastoreContext, databaseName, tableName, partition); } @Override - public Set listTablePrivileges(String databaseName, String tableName, PrestoPrincipal principal) + public Set listTablePrivileges(MetastoreContext metastoreContext, String databaseName, String tableName, PrestoPrincipal principal) { return loadValue( tablePrivilegesCache, new UserTableKey(principal, databaseName, tableName), - () -> delegate.listTablePrivileges(databaseName, tableName, principal)); + () -> delegate.listTablePrivileges(metastoreContext, databaseName, tableName, principal)); } @Override - public void grantTablePrivileges(String databaseName, String tableName, PrestoPrincipal grantee, Set privileges) + public void grantTablePrivileges(MetastoreContext metastoreContext, String databaseName, String tableName, PrestoPrincipal grantee, Set privileges) { verifyRecordingMode(); - delegate.grantTablePrivileges(databaseName, tableName, grantee, privileges); + delegate.grantTablePrivileges(metastoreContext, databaseName, tableName, grantee, privileges); } @Override - public void revokeTablePrivileges(String databaseName, String tableName, PrestoPrincipal grantee, Set privileges) + public void revokeTablePrivileges(MetastoreContext metastoreContext, String databaseName, String tableName, PrestoPrincipal grantee, Set privileges) { verifyRecordingMode(); - delegate.revokeTablePrivileges(databaseName, tableName, grantee, privileges); + delegate.revokeTablePrivileges(metastoreContext, databaseName, tableName, grantee, privileges); } private Set getHivePartitionNames(String databaseName, String tableName, Set partitionNames) @@ -425,52 +427,52 @@ private Set getHivePartitionNames(String databaseName, String } @Override - public void createRole(String role, String grantor) + public void createRole(MetastoreContext metastoreContext, String role, String grantor) { verifyRecordingMode(); - delegate.createRole(role, grantor); + delegate.createRole(metastoreContext, role, grantor); } @Override - public void dropRole(String role) + public void dropRole(MetastoreContext metastoreContext, String role) { verifyRecordingMode(); - delegate.dropRole(role); + delegate.dropRole(metastoreContext, role); } @Override - public Set listRoles() + public Set listRoles(MetastoreContext metastoreContext) { if (replay) { return allRoles.orElseThrow(() -> new PrestoException(NOT_FOUND, "Missing entry for roles")); } - Set result = delegate.listRoles(); + Set result = delegate.listRoles(metastoreContext); allRoles = Optional.of(result); return result; } @Override - public void grantRoles(Set roles, Set grantees, boolean withAdminOption, PrestoPrincipal grantor) + public void grantRoles(MetastoreContext metastoreContext, Set roles, Set grantees, boolean withAdminOption, PrestoPrincipal grantor) { verifyRecordingMode(); - delegate.grantRoles(roles, grantees, withAdminOption, grantor); + delegate.grantRoles(metastoreContext, roles, grantees, withAdminOption, grantor); } @Override - public void revokeRoles(Set roles, Set grantees, boolean adminOptionFor, PrestoPrincipal grantor) + public void revokeRoles(MetastoreContext metastoreContext, Set roles, Set grantees, boolean adminOptionFor, PrestoPrincipal grantor) { verifyRecordingMode(); - delegate.revokeRoles(roles, grantees, adminOptionFor, grantor); + delegate.revokeRoles(metastoreContext, roles, grantees, adminOptionFor, grantor); } @Override - public Set listRoleGrants(PrestoPrincipal principal) + public Set listRoleGrants(MetastoreContext metastoreContext, PrestoPrincipal principal) { return loadValue( roleGrantsCache, principal, - () -> delegate.listRoleGrants(principal)); + () -> delegate.listRoleGrants(metastoreContext, principal)); } private V loadValue(Cache cache, K key, Supplier valueSupplier) diff --git a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/SemiTransactionalHiveMetastore.java b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/SemiTransactionalHiveMetastore.java index c3c4bef51b41d..72ee5316f6600 100644 --- a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/SemiTransactionalHiveMetastore.java +++ b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/SemiTransactionalHiveMetastore.java @@ -139,33 +139,33 @@ public SemiTransactionalHiveMetastore( this.undoMetastoreOperationsEnabled = undoMetastoreOperationsEnabled; } - public synchronized List getAllDatabases() + public synchronized List getAllDatabases(MetastoreContext metastoreContext) { checkReadable(); - return delegate.getAllDatabases(); + return delegate.getAllDatabases(metastoreContext); } - public synchronized Optional getDatabase(String databaseName) + public synchronized Optional getDatabase(MetastoreContext metastoreContext, String databaseName) { checkReadable(); - return delegate.getDatabase(databaseName); + return delegate.getDatabase(metastoreContext, databaseName); } - public synchronized Optional> getAllTables(String databaseName) + public synchronized Optional> getAllTables(MetastoreContext metastoreContext, String databaseName) { checkReadable(); if (!tableActions.isEmpty()) { throw new UnsupportedOperationException("Listing all tables after adding/dropping/altering tables/views in a transaction is not supported"); } - return delegate.getAllTables(databaseName); + return delegate.getAllTables(metastoreContext, databaseName); } - public synchronized Optional
getTable(String databaseName, String tableName) + public synchronized Optional
getTable(MetastoreContext metastoreContext, String databaseName, String tableName) { checkReadable(); Action tableAction = tableActions.get(new SchemaTableName(databaseName, tableName)); if (tableAction == null) { - return delegate.getTable(databaseName, tableName); + return delegate.getTable(metastoreContext, databaseName, tableName); } switch (tableAction.getType()) { case ADD: @@ -179,17 +179,17 @@ public synchronized Optional
getTable(String databaseName, String tableNa } } - public synchronized Set getSupportedColumnStatistics(Type type) + public synchronized Set getSupportedColumnStatistics(MetastoreContext metastoreContext, Type type) { - return delegate.getSupportedColumnStatistics(type); + return delegate.getSupportedColumnStatistics(metastoreContext, type); } - public synchronized PartitionStatistics getTableStatistics(String databaseName, String tableName) + public synchronized PartitionStatistics getTableStatistics(MetastoreContext metastoreContext, String databaseName, String tableName) { checkReadable(); Action tableAction = tableActions.get(new SchemaTableName(databaseName, tableName)); if (tableAction == null) { - return delegate.getTableStatistics(databaseName, tableName); + return delegate.getTableStatistics(metastoreContext, databaseName, tableName); } switch (tableAction.getType()) { case ADD: @@ -203,10 +203,10 @@ public synchronized PartitionStatistics getTableStatistics(String databaseName, } } - public synchronized Map getPartitionStatistics(String databaseName, String tableName, Set partitionNames) + public synchronized Map getPartitionStatistics(MetastoreContext metastoreContext, String databaseName, String tableName, Set partitionNames) { checkReadable(); - Optional
table = getTable(databaseName, tableName); + Optional
table = getTable(metastoreContext, databaseName, tableName); if (!table.isPresent()) { return ImmutableMap.of(); } @@ -234,7 +234,7 @@ public synchronized Map getPartitionStatistics(Stri } } - Map delegateResult = delegate.getPartitionStatistics(databaseName, tableName, partitionNamesToQuery.build()); + Map delegateResult = delegate.getPartitionStatistics(metastoreContext, databaseName, tableName, partitionNamesToQuery.build()); if (!delegateResult.isEmpty()) { resultBuilder.putAll(delegateResult); } @@ -271,10 +271,10 @@ private TableSource getTableSource(String databaseName, String tableName) } } - public synchronized HivePageSinkMetadata generatePageSinkMetadata(SchemaTableName schemaTableName) + public synchronized HivePageSinkMetadata generatePageSinkMetadata(MetastoreContext metastoreContext, SchemaTableName schemaTableName) { checkReadable(); - Optional
table = getTable(schemaTableName.getSchemaName(), schemaTableName.getTableName()); + Optional
table = getTable(metastoreContext, schemaTableName.getSchemaName(), schemaTableName.getTableName()); if (!table.isPresent()) { return new HivePageSinkMetadata(schemaTableName, Optional.empty(), ImmutableMap.of()); } @@ -296,43 +296,43 @@ public synchronized HivePageSinkMetadata generatePageSinkMetadata(SchemaTableNam modifiedPartitionMap); } - public synchronized Optional> getAllViews(String databaseName) + public synchronized Optional> getAllViews(MetastoreContext metastoreContext, String databaseName) { checkReadable(); if (!tableActions.isEmpty()) { throw new UnsupportedOperationException("Listing all tables after adding/dropping/altering tables/views in a transaction is not supported"); } - return delegate.getAllViews(databaseName); + return delegate.getAllViews(metastoreContext, databaseName); } - public synchronized void createDatabase(Database database) + public synchronized void createDatabase(MetastoreContext metastoreContext, Database database) { - setExclusive((delegate, hdfsEnvironment) -> delegate.createDatabase(database)); + setExclusive((delegate, hdfsEnvironment) -> delegate.createDatabase(metastoreContext, database)); } - public synchronized void dropDatabase(String schemaName) + public synchronized void dropDatabase(MetastoreContext metastoreContext, String schemaName) { - setExclusive((delegate, hdfsEnvironment) -> delegate.dropDatabase(schemaName)); + setExclusive((delegate, hdfsEnvironment) -> delegate.dropDatabase(metastoreContext, schemaName)); } - public synchronized void renameDatabase(String source, String target) + public synchronized void renameDatabase(MetastoreContext metastoreContext, String source, String target) { - setExclusive((delegate, hdfsEnvironment) -> delegate.renameDatabase(source, target)); + setExclusive((delegate, hdfsEnvironment) -> delegate.renameDatabase(metastoreContext, source, target)); } // TODO: Allow updating statistics for 2 tables in the same transaction - public synchronized void setTableStatistics(Table table, PartitionStatistics tableStatistics) + public synchronized void setTableStatistics(MetastoreContext metastoreContext, Table table, PartitionStatistics tableStatistics) { - setExclusive((delegate, hdfsEnvironment) -> - delegate.updateTableStatistics(table.getDatabaseName(), table.getTableName(), statistics -> updatePartitionStatistics(statistics, tableStatistics))); + setExclusive((delegate, hdfsEnvironment) -> delegate.updateTableStatistics(metastoreContext, table.getDatabaseName(), table.getTableName(), statistics -> updatePartitionStatistics(statistics, tableStatistics))); } // TODO: Allow updating statistics for 2 tables in the same transaction - public synchronized void setPartitionStatistics(Table table, Map, PartitionStatistics> partitionStatisticsMap) + public synchronized void setPartitionStatistics(MetastoreContext metastoreContext, Table table, Map, PartitionStatistics> partitionStatisticsMap) { setExclusive((delegate, hdfsEnvironment) -> partitionStatisticsMap.forEach((partitionValues, newPartitionStats) -> delegate.updatePartitionStatistics( + metastoreContext, table.getDatabaseName(), table.getTableName(), getPartitionName(table, partitionValues), @@ -416,29 +416,29 @@ public synchronized void dropTable(HdfsContext context, String databaseName, Str } } - public synchronized void replaceView(String databaseName, String tableName, Table table, PrincipalPrivileges principalPrivileges) + public synchronized void replaceView(MetastoreContext metastoreContext, String databaseName, String tableName, Table table, PrincipalPrivileges principalPrivileges) { - setExclusive((delegate, hdfsEnvironment) -> delegate.replaceTable(databaseName, tableName, table, principalPrivileges)); + setExclusive((delegate, hdfsEnvironment) -> delegate.replaceTable(metastoreContext, databaseName, tableName, table, principalPrivileges)); } - public synchronized void renameTable(String databaseName, String tableName, String newDatabaseName, String newTableName) + public synchronized void renameTable(MetastoreContext metastoreContext, String databaseName, String tableName, String newDatabaseName, String newTableName) { - setExclusive((delegate, hdfsEnvironment) -> delegate.renameTable(databaseName, tableName, newDatabaseName, newTableName)); + setExclusive((delegate, hdfsEnvironment) -> delegate.renameTable(metastoreContext, databaseName, tableName, newDatabaseName, newTableName)); } - public synchronized void addColumn(String databaseName, String tableName, String columnName, HiveType columnType, String columnComment) + public synchronized void addColumn(MetastoreContext metastoreContext, String databaseName, String tableName, String columnName, HiveType columnType, String columnComment) { - setExclusive((delegate, hdfsEnvironment) -> delegate.addColumn(databaseName, tableName, columnName, columnType, columnComment)); + setExclusive((delegate, hdfsEnvironment) -> delegate.addColumn(metastoreContext, databaseName, tableName, columnName, columnType, columnComment)); } - public synchronized void renameColumn(String databaseName, String tableName, String oldColumnName, String newColumnName) + public synchronized void renameColumn(MetastoreContext metastoreContext, String databaseName, String tableName, String oldColumnName, String newColumnName) { - setExclusive((delegate, hdfsEnvironment) -> delegate.renameColumn(databaseName, tableName, oldColumnName, newColumnName)); + setExclusive((delegate, hdfsEnvironment) -> delegate.renameColumn(metastoreContext, databaseName, tableName, oldColumnName, newColumnName)); } - public synchronized void dropColumn(String databaseName, String tableName, String columnName) + public synchronized void dropColumn(MetastoreContext metastoreContext, String databaseName, String tableName, String columnName) { - setExclusive((delegate, hdfsEnvironment) -> delegate.dropColumn(databaseName, tableName, columnName)); + setExclusive((delegate, hdfsEnvironment) -> delegate.dropColumn(metastoreContext, databaseName, tableName, columnName)); } public synchronized void finishInsertIntoExistingTable( @@ -454,10 +454,12 @@ public synchronized void finishInsertIntoExistingTable( setShared(); SchemaTableName schemaTableName = new SchemaTableName(databaseName, tableName); Action oldTableAction = tableActions.get(schemaTableName); + MetastoreContext metastoreContext = new MetastoreContext(session.getIdentity()); if (oldTableAction == null || oldTableAction.getData().getTable().getTableType().equals(TEMPORARY_TABLE)) { - Table table = getTable(databaseName, tableName) + Table table = getTable(metastoreContext, databaseName, tableName) .orElseThrow(() -> new TableNotFoundException(schemaTableName)); - PartitionStatistics currentStatistics = getTableStatistics(databaseName, tableName); + + PartitionStatistics currentStatistics = getTableStatistics(metastoreContext, databaseName, tableName); HdfsContext context = new HdfsContext(session, databaseName, tableName, table.getStorage().getLocation(), false); tableActions.put( schemaTableName, @@ -490,7 +492,7 @@ public synchronized void finishInsertIntoExistingTable( public synchronized void truncateUnpartitionedTable(ConnectorSession session, String databaseName, String tableName) { checkReadable(); - Optional
table = getTable(databaseName, tableName); + Optional
table = getTable(new MetastoreContext(session.getIdentity()), databaseName, tableName); SchemaTableName schemaTableName = new SchemaTableName(databaseName, tableName); if (!table.isPresent()) { throw new TableNotFoundException(schemaTableName); @@ -515,18 +517,19 @@ public synchronized void truncateUnpartitionedTable(ConnectorSession session, St }); } - public synchronized Optional> getPartitionNames(String databaseName, String tableName) + public synchronized Optional> getPartitionNames(MetastoreContext metastoreContext, String databaseName, String tableName) { - return doGetPartitionNames(databaseName, tableName, ImmutableMap.of()); + return doGetPartitionNames(metastoreContext, databaseName, tableName, ImmutableMap.of()); } - public synchronized Optional> getPartitionNamesByFilter(String databaseName, String tableName, Map effectivePredicate) + public synchronized Optional> getPartitionNamesByFilter(MetastoreContext metastoreContext, String databaseName, String tableName, Map effectivePredicate) { - return doGetPartitionNames(databaseName, tableName, effectivePredicate); + return doGetPartitionNames(metastoreContext, databaseName, tableName, effectivePredicate); } @GuardedBy("this") private Optional> doGetPartitionNames( + MetastoreContext metastoreContext, String databaseName, String tableName, Map partitionPredicates) @@ -534,7 +537,7 @@ private Optional> doGetPartitionNames( checkHoldsLock(); checkReadable(); - Optional
table = getTable(databaseName, tableName); + Optional
table = getTable(metastoreContext, databaseName, tableName); if (!table.isPresent()) { return Optional.empty(); } @@ -548,10 +551,10 @@ private Optional> doGetPartitionNames( Optional> partitionNameResult; List partitionColumns = table.get().getPartitionColumns(); if (!partitionPredicates.isEmpty()) { - partitionNameResult = Optional.of(delegate.getPartitionNamesByFilter(databaseName, tableName, partitionPredicates)); + partitionNameResult = Optional.of(delegate.getPartitionNamesByFilter(metastoreContext, databaseName, tableName, partitionPredicates)); } else { - partitionNameResult = delegate.getPartitionNames(databaseName, tableName); + partitionNameResult = delegate.getPartitionNames(metastoreContext, databaseName, tableName); } if (!partitionNameResult.isPresent()) { throw new PrestoException(TRANSACTION_CONFLICT, format("Table %s.%s was dropped by another transaction", databaseName, tableName)); @@ -618,7 +621,7 @@ private static boolean partitionValuesMatch(List values, List pa return true; } - public synchronized Optional getPartition(String databaseName, String tableName, List partitionValues) + public synchronized Optional getPartition(MetastoreContext metastoreContext, String databaseName, String tableName, List partitionValues) { checkReadable(); TableSource tableSource = getTableSource(databaseName, tableName); @@ -629,7 +632,7 @@ public synchronized Optional getPartition(String databaseName, String } switch (tableSource) { case PRE_EXISTING_TABLE: - return delegate.getPartition(databaseName, tableName, partitionValues); + return delegate.getPartition(metastoreContext, databaseName, tableName, partitionValues); case CREATED_IN_THIS_TRANSACTION: return Optional.empty(); default: @@ -637,7 +640,7 @@ public synchronized Optional getPartition(String databaseName, String } } - public synchronized Map> getPartitionsByNames(String databaseName, String tableName, List partitionNames) + public synchronized Map> getPartitionsByNames(MetastoreContext metastoreContext, String databaseName, String tableName, List partitionNames) { checkReadable(); TableSource tableSource = getTableSource(databaseName, tableName); @@ -663,7 +666,7 @@ public synchronized Map> getPartitionsByNames(String resultBuilder.put(partitionName, getPartitionFromPartitionAction(partitionAction)); } } - Map> delegateResult = delegate.getPartitionsByNames(databaseName, tableName, partitionNamesToQuery.build()); + Map> delegateResult = delegate.getPartitionsByNames(metastoreContext, databaseName, tableName, partitionNamesToQuery.build()); resultBuilder.putAll(delegateResult); return resultBuilder.build(); } @@ -765,11 +768,13 @@ public synchronized void finishInsertIntoExistingPartition( SchemaTableName schemaTableName = new SchemaTableName(databaseName, tableName); Map, Action> partitionActionsOfTable = partitionActions.computeIfAbsent(schemaTableName, k -> new HashMap<>()); Action oldPartitionAction = partitionActionsOfTable.get(partitionValues); + MetastoreContext metastoreContext = new MetastoreContext(session.getIdentity()); + if (oldPartitionAction == null) { - Partition partition = delegate.getPartition(databaseName, tableName, partitionValues) + Partition partition = delegate.getPartition(metastoreContext, databaseName, tableName, partitionValues) .orElseThrow(() -> new PartitionNotFoundException(schemaTableName, partitionValues)); - String partitionName = getPartitionName(databaseName, tableName, partitionValues); - PartitionStatistics currentStatistics = delegate.getPartitionStatistics(databaseName, tableName, ImmutableSet.of(partitionName)).get(partitionName); + String partitionName = getPartitionName(new MetastoreContext(session.getIdentity()), databaseName, tableName, partitionValues); + PartitionStatistics currentStatistics = delegate.getPartitionStatistics(metastoreContext, databaseName, tableName, ImmutableSet.of(partitionName)).get(partitionName); if (currentStatistics == null) { throw new PrestoException(HIVE_METASTORE_ERROR, "currentStatistics is null"); } @@ -800,9 +805,9 @@ public synchronized void finishInsertIntoExistingPartition( } } - private String getPartitionName(String databaseName, String tableName, List partitionValues) + private String getPartitionName(MetastoreContext metastoreContext, String databaseName, String tableName, List partitionValues) { - Table table = getTable(databaseName, tableName) + Table table = getTable(metastoreContext, databaseName, tableName) .orElseThrow(() -> new TableNotFoundException(new SchemaTableName(databaseName, tableName))); return getPartitionName(table, partitionValues); } @@ -815,45 +820,45 @@ private String getPartitionName(Table table, List partitionValues) return makePartName(columnNames, partitionValues); } - public synchronized void createRole(String role, String grantor) + public synchronized void createRole(MetastoreContext metastoreContext, String role, String grantor) { - setExclusive((delegate, hdfsEnvironment) -> delegate.createRole(role, grantor)); + setExclusive((delegate, hdfsEnvironment) -> delegate.createRole(metastoreContext, role, grantor)); } - public synchronized void dropRole(String role) + public synchronized void dropRole(MetastoreContext metastoreContext, String role) { - setExclusive((delegate, hdfsEnvironment) -> delegate.dropRole(role)); + setExclusive((delegate, hdfsEnvironment) -> delegate.dropRole(metastoreContext, role)); } - public synchronized Set listRoles() + public synchronized Set listRoles(MetastoreContext metastoreContext) { checkReadable(); - return delegate.listRoles(); + return delegate.listRoles(metastoreContext); } - public synchronized void grantRoles(Set roles, Set grantees, boolean withAdminOption, PrestoPrincipal grantor) + public synchronized void grantRoles(MetastoreContext metastoreContext, Set roles, Set grantees, boolean withAdminOption, PrestoPrincipal grantor) { - setExclusive((delegate, hdfsEnvironment) -> delegate.grantRoles(roles, grantees, withAdminOption, grantor)); + setExclusive((delegate, hdfsEnvironment) -> delegate.grantRoles(metastoreContext, roles, grantees, withAdminOption, grantor)); } - public synchronized void revokeRoles(Set roles, Set grantees, boolean adminOptionFor, PrestoPrincipal grantor) + public synchronized void revokeRoles(MetastoreContext metastoreContext, Set roles, Set grantees, boolean adminOptionFor, PrestoPrincipal grantor) { - setExclusive((delegate, hdfsEnvironment) -> delegate.revokeRoles(roles, grantees, adminOptionFor, grantor)); + setExclusive((delegate, hdfsEnvironment) -> delegate.revokeRoles(metastoreContext, roles, grantees, adminOptionFor, grantor)); } - public synchronized Set listRoleGrants(PrestoPrincipal principal) + public synchronized Set listRoleGrants(MetastoreContext metastoreContext, PrestoPrincipal principal) { checkReadable(); - return delegate.listRoleGrants(principal); + return delegate.listRoleGrants(metastoreContext, principal); } - public synchronized Set listTablePrivileges(String databaseName, String tableName, PrestoPrincipal principal) + public synchronized Set listTablePrivileges(MetastoreContext metastoreContext, String databaseName, String tableName, PrestoPrincipal principal) { checkReadable(); SchemaTableName schemaTableName = new SchemaTableName(databaseName, tableName); Action tableAction = tableActions.get(schemaTableName); if (tableAction == null) { - return delegate.listTablePrivileges(databaseName, tableName, principal); + return delegate.listTablePrivileges(metastoreContext, databaseName, tableName, principal); } switch (tableAction.getType()) { case ADD: @@ -871,7 +876,7 @@ public synchronized Set listTablePrivileges(String databaseNa .build(); } case INSERT_EXISTING: - return delegate.listTablePrivileges(databaseName, tableName, principal); + return delegate.listTablePrivileges(metastoreContext, databaseName, tableName, principal); case DROP: throw new TableNotFoundException(schemaTableName); default: @@ -879,18 +884,19 @@ public synchronized Set listTablePrivileges(String databaseNa } } - public synchronized void grantTablePrivileges(String databaseName, String tableName, PrestoPrincipal grantee, Set privileges) + public synchronized void grantTablePrivileges(MetastoreContext metastoreContext, String databaseName, String tableName, PrestoPrincipal grantee, Set privileges) { - setExclusive((delegate, hdfsEnvironment) -> delegate.grantTablePrivileges(databaseName, tableName, grantee, privileges)); + setExclusive((delegate, hdfsEnvironment) -> delegate.grantTablePrivileges(metastoreContext, databaseName, tableName, grantee, privileges)); } - public synchronized void revokeTablePrivileges(String databaseName, String tableName, PrestoPrincipal grantee, Set privileges) + public synchronized void revokeTablePrivileges(MetastoreContext metastoreContext, String databaseName, String tableName, PrestoPrincipal grantee, Set privileges) { - setExclusive((delegate, hdfsEnvironment) -> delegate.revokeTablePrivileges(databaseName, tableName, grantee, privileges)); + setExclusive((delegate, hdfsEnvironment) -> delegate.revokeTablePrivileges(metastoreContext, databaseName, tableName, grantee, privileges)); } public synchronized void declareIntentionToWrite( HdfsContext context, + MetastoreContext metastoreContext, WriteMode writeMode, Path stagingPathRoot, Optional tempPathRoot, @@ -905,7 +911,7 @@ public synchronized void declareIntentionToWrite( throw new PrestoException(NOT_SUPPORTED, "Can not insert into a table with a partition that has been modified in the same transaction when Presto is configured to skip temporary directories."); } } - declaredIntentionsToWrite.add(new DeclaredIntentionToWrite(writeMode, context, stagingPathRoot, tempPathRoot, filePrefix, schemaTableName, temporaryTable)); + declaredIntentionsToWrite.add(new DeclaredIntentionToWrite(writeMode, context, metastoreContext, stagingPathRoot, tempPathRoot, filePrefix, schemaTableName, temporaryTable)); } public synchronized void commit() @@ -963,18 +969,19 @@ private void commitShared() for (Map.Entry> entry : tableActions.entrySet()) { SchemaTableName schemaTableName = entry.getKey(); Action action = entry.getValue(); + MetastoreContext metastoreContext = new MetastoreContext(action.getContext().getIdentity()); switch (action.getType()) { case DROP: - committer.prepareDropTable(schemaTableName); + committer.prepareDropTable(metastoreContext, schemaTableName); break; case ALTER: committer.prepareAlterTable(); break; case ADD: - committer.prepareAddTable(action.getContext(), action.getData()); + committer.prepareAddTable(metastoreContext, action.getContext(), action.getData()); break; case INSERT_EXISTING: - committer.prepareInsertExistingTable(action.getContext(), action.getData()); + committer.prepareInsertExistingTable(metastoreContext, action.getContext(), action.getData()); break; default: throw new IllegalStateException("Unknown action type"); @@ -986,18 +993,19 @@ private void commitShared() List partitionValues = partitionEntry.getKey(); Action action = partitionEntry.getValue(); HdfsContext context = action.getContext(); + MetastoreContext metastoreContext = new MetastoreContext(action.getContext().getIdentity()); switch (action.getType()) { case DROP: - committer.prepareDropPartition(schemaTableName, partitionValues); + committer.prepareDropPartition(metastoreContext, schemaTableName, partitionValues); break; case ALTER: - committer.prepareAlterPartition(context, action.getData()); + committer.prepareAlterPartition(metastoreContext, context, action.getData()); break; case ADD: - committer.prepareAddPartition(context, action.getData()); + committer.prepareAddPartition(metastoreContext, context, action.getData()); break; case INSERT_EXISTING: - committer.prepareInsertExistingPartition(context, action.getData()); + committer.prepareInsertExistingPartition(metastoreContext, context, action.getData()); break; default: throw new IllegalStateException("Unknown action type"); @@ -1108,11 +1116,11 @@ private List> getFileRenameFutures() return ImmutableList.copyOf(fileRenameFutures); } - private void prepareDropTable(SchemaTableName schemaTableName) + private void prepareDropTable(MetastoreContext metastoreContext, SchemaTableName schemaTableName) { metastoreDeleteOperations.add(new IrreversibleMetastoreOperation( format("drop table %s", schemaTableName), - () -> delegate.dropTable(schemaTableName.getSchemaName(), schemaTableName.getTableName(), true))); + () -> delegate.dropTable(metastoreContext, schemaTableName.getSchemaName(), schemaTableName.getTableName(), true))); } private void prepareAlterTable() @@ -1125,7 +1133,7 @@ private void prepareAlterTable() throw new UnsupportedOperationException("Dropping and then creating a table with the same name is not supported"); } - private void prepareAddTable(HdfsContext context, TableAndMore tableAndMore) + private void prepareAddTable(MetastoreContext metastoreContext, HdfsContext context, TableAndMore tableAndMore) { deleteOnly = false; @@ -1177,9 +1185,9 @@ private void prepareAddTable(HdfsContext context, TableAndMore tableAndMore) } } } - addTableOperations.add(new CreateTableOperation(table, tableAndMore.getPrincipalPrivileges(), tableAndMore.isIgnoreExisting())); + addTableOperations.add(new CreateTableOperation(metastoreContext, table, tableAndMore.getPrincipalPrivileges(), tableAndMore.isIgnoreExisting())); if (!isPrestoView(table)) { - updateStatisticsOperations.add(new UpdateStatisticsOperation( + updateStatisticsOperations.add(new UpdateStatisticsOperation(metastoreContext, new SchemaTableName(table.getDatabaseName(), table.getTableName()), Optional.empty(), tableAndMore.getStatisticsUpdate(), @@ -1187,7 +1195,7 @@ private void prepareAddTable(HdfsContext context, TableAndMore tableAndMore) } } - private void prepareInsertExistingTable(HdfsContext context, TableAndMore tableAndMore) + private void prepareInsertExistingTable(MetastoreContext metastoreContext, HdfsContext context, TableAndMore tableAndMore) { deleteOnly = false; @@ -1204,34 +1212,34 @@ private void prepareInsertExistingTable(HdfsContext context, TableAndMore tableA if (!targetPath.equals(currentPath)) { asyncRename(hdfsEnvironment, renameExecutor, fileRenameCancelled, fileRenameFutures, context, currentPath, targetPath, tableAndMore.getFileNames().get()); } - updateStatisticsOperations.add(new UpdateStatisticsOperation( + updateStatisticsOperations.add(new UpdateStatisticsOperation(metastoreContext, new SchemaTableName(table.getDatabaseName(), table.getTableName()), Optional.empty(), tableAndMore.getStatisticsUpdate(), true)); } - private void prepareDropPartition(SchemaTableName schemaTableName, List partitionValues) + private void prepareDropPartition(MetastoreContext metastoreContext, SchemaTableName schemaTableName, List partitionValues) { metastoreDeleteOperations.add(new IrreversibleMetastoreOperation( format("drop partition %s.%s %s", schemaTableName.getSchemaName(), schemaTableName.getTableName(), partitionValues), - () -> delegate.dropPartition(schemaTableName.getSchemaName(), schemaTableName.getTableName(), partitionValues, true))); + () -> delegate.dropPartition(metastoreContext, schemaTableName.getSchemaName(), schemaTableName.getTableName(), partitionValues, true))); } - private void prepareAlterPartition(HdfsContext context, PartitionAndMore partitionAndMore) + private void prepareAlterPartition(MetastoreContext metastoreContext, HdfsContext context, PartitionAndMore partitionAndMore) { deleteOnly = false; Partition partition = partitionAndMore.getPartition(); String targetLocation = partition.getStorage().getLocation(); - Optional oldPartition = delegate.getPartition(partition.getDatabaseName(), partition.getTableName(), partition.getValues()); + Optional oldPartition = delegate.getPartition(metastoreContext, partition.getDatabaseName(), partition.getTableName(), partition.getValues()); if (!oldPartition.isPresent()) { throw new PrestoException( TRANSACTION_CONFLICT, format("The partition that this transaction modified was deleted in another transaction. %s %s", partition.getTableName(), partition.getValues())); } - String partitionName = getPartitionName(partition.getDatabaseName(), partition.getTableName(), partition.getValues()); - PartitionStatistics oldPartitionStatistics = getExistingPartitionStatistics(partition, partitionName); + String partitionName = getPartitionName(metastoreContext, partition.getDatabaseName(), partition.getTableName(), partition.getValues()); + PartitionStatistics oldPartitionStatistics = getExistingPartitionStatistics(metastoreContext, partition, partitionName); String oldPartitionLocation = oldPartition.get().getStorage().getLocation(); Path oldPartitionPath = new Path(oldPartitionLocation); @@ -1272,15 +1280,15 @@ private void prepareAlterPartition(HdfsContext context, PartitionAndMore partiti } // Partition alter must happen regardless of whether original and current location is the same // because metadata might change: e.g. storage format, column types, etc - alterPartitionOperations.add(new AlterPartitionOperation( + alterPartitionOperations.add(new AlterPartitionOperation(metastoreContext, new PartitionWithStatistics(partition, partitionName, partitionAndMore.getStatisticsUpdate()), new PartitionWithStatistics(oldPartition.get(), partitionName, oldPartitionStatistics))); } - private PartitionStatistics getExistingPartitionStatistics(Partition partition, String partitionName) + private PartitionStatistics getExistingPartitionStatistics(MetastoreContext metastoreContext, Partition partition, String partitionName) { try { - PartitionStatistics statistics = delegate.getPartitionStatistics(partition.getDatabaseName(), partition.getTableName(), ImmutableSet.of(partitionName)) + PartitionStatistics statistics = delegate.getPartitionStatistics(metastoreContext, partition.getDatabaseName(), partition.getTableName(), ImmutableSet.of(partitionName)) .get(partitionName); if (statistics == null) { throw new PrestoException( @@ -1303,7 +1311,7 @@ private PartitionStatistics getExistingPartitionStatistics(Partition partition, } } - private void prepareAddPartition(HdfsContext context, PartitionAndMore partitionAndMore) + private void prepareAddPartition(MetastoreContext metastoreContext, HdfsContext context, PartitionAndMore partitionAndMore) { deleteOnly = false; @@ -1315,7 +1323,7 @@ private void prepareAddPartition(HdfsContext context, PartitionAndMore partition SchemaTableName schemaTableName = new SchemaTableName(partition.getDatabaseName(), partition.getTableName()); PartitionAdder partitionAdder = partitionAdders.computeIfAbsent( schemaTableName, - ignored -> new PartitionAdder(partition.getDatabaseName(), partition.getTableName(), delegate, PARTITION_COMMIT_BATCH_SIZE)); + ignored -> new PartitionAdder(metastoreContext, partition.getDatabaseName(), partition.getTableName(), delegate, PARTITION_COMMIT_BATCH_SIZE)); if (pathExists(context, hdfsEnvironment, currentPath)) { if (!targetPath.equals(currentPath)) { @@ -1331,11 +1339,11 @@ private void prepareAddPartition(HdfsContext context, PartitionAndMore partition cleanUpTasksForAbort.add(new DirectoryCleanUpTask(context, targetPath, true)); createDirectory(context, hdfsEnvironment, targetPath); } - String partitionName = getPartitionName(partition.getDatabaseName(), partition.getTableName(), partition.getValues()); + String partitionName = getPartitionName(metastoreContext, partition.getDatabaseName(), partition.getTableName(), partition.getValues()); partitionAdder.addPartition(new PartitionWithStatistics(partition, partitionName, partitionAndMore.getStatisticsUpdate())); } - private void prepareInsertExistingPartition(HdfsContext context, PartitionAndMore partitionAndMore) + private void prepareInsertExistingPartition(MetastoreContext metastoreContext, HdfsContext context, PartitionAndMore partitionAndMore) { deleteOnly = false; @@ -1346,9 +1354,9 @@ private void prepareInsertExistingPartition(HdfsContext context, PartitionAndMor if (!targetPath.equals(currentPath)) { asyncRename(hdfsEnvironment, renameExecutor, fileRenameCancelled, fileRenameFutures, context, currentPath, targetPath, partitionAndMore.getFileNames()); } - updateStatisticsOperations.add(new UpdateStatisticsOperation( + updateStatisticsOperations.add(new UpdateStatisticsOperation(metastoreContext, new SchemaTableName(partition.getDatabaseName(), partition.getTableName()), - Optional.of(getPartitionName(partition.getDatabaseName(), partition.getTableName(), partition.getValues())), + Optional.of(getPartitionName(metastoreContext, partition.getDatabaseName(), partition.getTableName(), partition.getValues())), partitionAndMore.getStatisticsUpdate(), true)); } @@ -1596,14 +1604,15 @@ private void rollbackShared() pathsToClean.add(baseDirectory); SchemaTableName schemaTableName = declaredIntentionToWrite.getSchemaTableName(); - Optional
table = delegate.getTable(schemaTableName.getSchemaName(), schemaTableName.getTableName()); + MetastoreContext metastoreContext = declaredIntentionToWrite.getMetastoreContext(); + Optional
table = delegate.getTable(metastoreContext, schemaTableName.getSchemaName(), schemaTableName.getTableName()); if (table.isPresent()) { // check every existing partition that is outside for the base directory if (!table.get().getPartitionColumns().isEmpty()) { - List partitionNames = delegate.getPartitionNames(schemaTableName.getSchemaName(), schemaTableName.getTableName()) + List partitionNames = delegate.getPartitionNames(metastoreContext, schemaTableName.getSchemaName(), schemaTableName.getTableName()) .orElse(ImmutableList.of()); for (List partitionNameBatch : Iterables.partition(partitionNames, 10)) { - Collection> partitions = delegate.getPartitionsByNames(schemaTableName.getSchemaName(), schemaTableName.getTableName(), partitionNameBatch).values(); + Collection> partitions = delegate.getPartitionsByNames(metastoreContext, schemaTableName.getSchemaName(), schemaTableName.getTableName(), partitionNameBatch).values(); partitions.stream() .filter(Optional::isPresent) .map(Optional::get) @@ -2236,10 +2245,12 @@ private static class DeclaredIntentionToWrite private final Optional tempPathRoot; private final SchemaTableName schemaTableName; private final boolean temporaryTable; + private final MetastoreContext metastoreContext; public DeclaredIntentionToWrite( WriteMode mode, HdfsContext context, + MetastoreContext metastoreContext, Path stagingPathRoot, Optional tempPathRoot, String filePrefix, @@ -2248,6 +2259,7 @@ public DeclaredIntentionToWrite( { this.mode = requireNonNull(mode, "mode is null"); this.context = requireNonNull(context, "context is null"); + this.metastoreContext = requireNonNull(metastoreContext, "metastoreContext is null"); this.stagingPathRoot = requireNonNull(stagingPathRoot, "stagingPathRoot is null"); this.tempPathRoot = requireNonNull(tempPathRoot, "tempPathRoot is null"); this.filePrefix = requireNonNull(filePrefix, "filePrefix is null"); @@ -2290,12 +2302,18 @@ public boolean isTemporaryTable() return temporaryTable; } + public MetastoreContext getMetastoreContext() + { + return metastoreContext; + } + @Override public String toString() { return toStringHelper(this) .add("mode", mode) .add("context", context) + .add("metastoreContext", metastoreContext) .add("filePrefix", filePrefix) .add("stagingPathRoot", stagingPathRoot) .add("tempPathRoot", tempPathRoot) @@ -2443,10 +2461,12 @@ private static class CreateTableOperation private boolean tableCreated; private final boolean ignoreExisting; private final String queryId; + private final MetastoreContext metastoreContext; - public CreateTableOperation(Table newTable, PrincipalPrivileges privileges, boolean ignoreExisting) + public CreateTableOperation(MetastoreContext metastoreContext, Table newTable, PrincipalPrivileges privileges, boolean ignoreExisting) { requireNonNull(newTable, "newTable is null"); + this.metastoreContext = requireNonNull(metastoreContext, "identity is null"); this.newTable = newTable; this.privileges = requireNonNull(privileges, "privileges is null"); this.ignoreExisting = ignoreExisting; @@ -2462,12 +2482,12 @@ public void run(ExtendedHiveMetastore metastore) { boolean done = false; try { - metastore.createTable(newTable, privileges); + metastore.createTable(metastoreContext, newTable, privileges); done = true; } catch (RuntimeException e) { try { - Optional
existingTable = metastore.getTable(newTable.getDatabaseName(), newTable.getTableName()); + Optional
existingTable = metastore.getTable(metastoreContext, newTable.getDatabaseName(), newTable.getTableName()); if (existingTable.isPresent()) { Table table = existingTable.get(); Optional existingTableQueryId = getPrestoQueryId(table); @@ -2527,7 +2547,7 @@ public void undo(ExtendedHiveMetastore metastore) if (!tableCreated) { return; } - metastore.dropTable(newTable.getDatabaseName(), newTable.getTableName(), false); + metastore.dropTable(metastoreContext, newTable.getDatabaseName(), newTable.getTableName(), false); } } @@ -2536,11 +2556,13 @@ private static class AlterPartitionOperation private final PartitionWithStatistics newPartition; private final PartitionWithStatistics oldPartition; private boolean undo; + private final MetastoreContext metastoreContext; - public AlterPartitionOperation(PartitionWithStatistics newPartition, PartitionWithStatistics oldPartition) + public AlterPartitionOperation(MetastoreContext metastoreContext, PartitionWithStatistics newPartition, PartitionWithStatistics oldPartition) { this.newPartition = requireNonNull(newPartition, "newPartition is null"); this.oldPartition = requireNonNull(oldPartition, "oldPartition is null"); + this.metastoreContext = requireNonNull(metastoreContext, "metastoreContext is null"); checkArgument(newPartition.getPartition().getDatabaseName().equals(oldPartition.getPartition().getDatabaseName())); checkArgument(newPartition.getPartition().getTableName().equals(oldPartition.getPartition().getTableName())); checkArgument(newPartition.getPartition().getValues().equals(oldPartition.getPartition().getValues())); @@ -2558,7 +2580,7 @@ public String getDescription() public void run(ExtendedHiveMetastore metastore) { undo = true; - metastore.alterPartition(newPartition.getPartition().getDatabaseName(), newPartition.getPartition().getTableName(), newPartition); + metastore.alterPartition(metastoreContext, newPartition.getPartition().getDatabaseName(), newPartition.getPartition().getTableName(), newPartition); } public void undo(ExtendedHiveMetastore metastore) @@ -2566,7 +2588,7 @@ public void undo(ExtendedHiveMetastore metastore) if (!undo) { return; } - metastore.alterPartition(oldPartition.getPartition().getDatabaseName(), oldPartition.getPartition().getTableName(), oldPartition); + metastore.alterPartition(metastoreContext, oldPartition.getPartition().getDatabaseName(), oldPartition.getPartition().getTableName(), oldPartition); } } @@ -2575,13 +2597,15 @@ private static class UpdateStatisticsOperation private final SchemaTableName tableName; private final Optional partitionName; private final PartitionStatistics statistics; + private final MetastoreContext metastoreContext; private final boolean merge; private boolean done; - public UpdateStatisticsOperation(SchemaTableName tableName, Optional partitionName, PartitionStatistics statistics, boolean merge) + public UpdateStatisticsOperation(MetastoreContext metastoreContext, SchemaTableName tableName, Optional partitionName, PartitionStatistics statistics, boolean merge) { this.tableName = requireNonNull(tableName, "tableName is null"); + this.metastoreContext = requireNonNull(metastoreContext, "metastoreContext is null"); this.partitionName = requireNonNull(partitionName, "partitionValues is null"); this.statistics = requireNonNull(statistics, "statistics is null"); this.merge = merge; @@ -2590,10 +2614,10 @@ public UpdateStatisticsOperation(SchemaTableName tableName, Optional par public void run(ExtendedHiveMetastore metastore) { if (partitionName.isPresent()) { - metastore.updatePartitionStatistics(tableName.getSchemaName(), tableName.getTableName(), partitionName.get(), this::updateStatistics); + metastore.updatePartitionStatistics(metastoreContext, tableName.getSchemaName(), tableName.getTableName(), partitionName.get(), this::updateStatistics); } else { - metastore.updateTableStatistics(tableName.getSchemaName(), tableName.getTableName(), this::updateStatistics); + metastore.updateTableStatistics(metastoreContext, tableName.getSchemaName(), tableName.getTableName(), this::updateStatistics); } done = true; } @@ -2604,10 +2628,10 @@ public void undo(ExtendedHiveMetastore metastore) return; } if (partitionName.isPresent()) { - metastore.updatePartitionStatistics(tableName.getSchemaName(), tableName.getTableName(), partitionName.get(), this::resetStatistics); + metastore.updatePartitionStatistics(metastoreContext, tableName.getSchemaName(), tableName.getTableName(), partitionName.get(), this::resetStatistics); } else { - metastore.updateTableStatistics(tableName.getSchemaName(), tableName.getTableName(), this::resetStatistics); + metastore.updateTableStatistics(metastoreContext, tableName.getSchemaName(), tableName.getTableName(), this::resetStatistics); } } @@ -2637,10 +2661,12 @@ private static class PartitionAdder private final ExtendedHiveMetastore metastore; private final int batchSize; private final List partitions; + private final MetastoreContext metastoreContext; private List> createdPartitionValues = new ArrayList<>(); - public PartitionAdder(String schemaName, String tableName, ExtendedHiveMetastore metastore, int batchSize) + public PartitionAdder(MetastoreContext metastoreContext, String schemaName, String tableName, ExtendedHiveMetastore metastore, int batchSize) { + this.metastoreContext = requireNonNull(metastoreContext, "metastoreContext is null"); this.schemaName = schemaName; this.tableName = tableName; this.metastore = metastore; @@ -2669,7 +2695,7 @@ public void execute() List> batchedPartitions = Lists.partition(partitions, batchSize); for (List batch : batchedPartitions) { try { - metastore.addPartitions(schemaName, tableName, batch); + metastore.addPartitions(metastoreContext, schemaName, tableName, batch); for (PartitionWithStatistics partition : batch) { createdPartitionValues.add(partition.getPartition().getValues()); } @@ -2680,7 +2706,7 @@ public void execute() boolean batchCompletelyAdded = true; for (PartitionWithStatistics partition : batch) { try { - Optional remotePartition = metastore.getPartition(schemaName, tableName, partition.getPartition().getValues()); + Optional remotePartition = metastore.getPartition(metastoreContext, schemaName, tableName, partition.getPartition().getValues()); // getPrestoQueryId(partition) is guaranteed to be non-empty. It is asserted in PartitionAdder.addPartition. if (remotePartition.isPresent() && getPrestoQueryId(remotePartition.get()).equals(getPrestoQueryId(partition.getPartition()))) { createdPartitionValues.add(partition.getPartition().getValues()); @@ -2717,7 +2743,7 @@ public List> rollback() List> partitionsFailedToRollback = new ArrayList<>(); for (List createdPartitionValue : createdPartitionValues) { try { - metastore.dropPartition(schemaName, tableName, createdPartitionValue, false); + metastore.dropPartition(metastoreContext, schemaName, tableName, createdPartitionValue, false); } catch (PartitionNotFoundException e) { // Maybe some one deleted the partition we added. diff --git a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/alluxio/AlluxioHiveMetastore.java b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/alluxio/AlluxioHiveMetastore.java index de32e1c1d4167..579d2b342a30b 100644 --- a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/alluxio/AlluxioHiveMetastore.java +++ b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/alluxio/AlluxioHiveMetastore.java @@ -27,6 +27,7 @@ import com.facebook.presto.hive.metastore.ExtendedHiveMetastore; import com.facebook.presto.hive.metastore.HiveColumnStatistics; import com.facebook.presto.hive.metastore.HivePrivilegeInfo; +import com.facebook.presto.hive.metastore.MetastoreContext; import com.facebook.presto.hive.metastore.MetastoreUtil; import com.facebook.presto.hive.metastore.Partition; import com.facebook.presto.hive.metastore.PartitionNameWithVersion; @@ -76,7 +77,7 @@ public AlluxioHiveMetastore(TableMasterClient client) } @Override - public Optional getDatabase(String databaseName) + public Optional getDatabase(MetastoreContext metastoreContext, String databaseName) { try { return Optional.of(AlluxioProtoUtils.fromProto(client.getDatabase(databaseName))); @@ -87,7 +88,7 @@ public Optional getDatabase(String databaseName) } @Override - public List getAllDatabases() + public List getAllDatabases(MetastoreContext metastoreContext) { try { return client.getAllDatabases(); @@ -98,7 +99,7 @@ public List getAllDatabases() } @Override - public Optional
getTable(String databaseName, String tableName) + public Optional
getTable(MetastoreContext metastoreContext, String databaseName, String tableName) { try { return Optional.of(AlluxioProtoUtils.fromProto(client.getTable(databaseName, tableName))); @@ -112,28 +113,28 @@ public Optional
getTable(String databaseName, String tableName) } @Override - public Set getSupportedColumnStatistics(Type type) + public Set getSupportedColumnStatistics(MetastoreContext metastoreContext, Type type) { return MetastoreUtil.getSupportedColumnStatistics(type); } - private Map groupStatisticsByColumn(List statistics, OptionalLong rowCount) + private Map groupStatisticsByColumn(MetastoreContext metastoreContext, List statistics, OptionalLong rowCount) { return statistics.stream() .collect(toImmutableMap(ColumnStatisticsInfo::getColName, statisticsInfo -> AlluxioProtoUtils.fromProto(statisticsInfo.getData(), rowCount))); } @Override - public PartitionStatistics getTableStatistics(String databaseName, String tableName) + public PartitionStatistics getTableStatistics(MetastoreContext metastoreContext, String databaseName, String tableName) { try { - Table table = getTable(databaseName, tableName).orElseThrow( + Table table = getTable(metastoreContext, databaseName, tableName).orElseThrow( () -> new PrestoException(HIVE_METASTORE_ERROR, String.format("Could not retrieve table %s.%s", databaseName, tableName))); HiveBasicStatistics basicStatistics = getHiveBasicStatistics(table.getParameters()); List columns = table.getPartitionColumns(); List columnNames = columns.stream().map(Column::getName).collect(toImmutableList()); List columnStatistics = client.getTableColumnStatistics(table.getDatabaseName(), table.getTableName(), columnNames); - return new PartitionStatistics(basicStatistics, groupStatisticsByColumn(columnStatistics, basicStatistics.getRowCount())); + return new PartitionStatistics(basicStatistics, groupStatisticsByColumn(metastoreContext, columnStatistics, basicStatistics.getRowCount())); } catch (Exception e) { throw new PrestoException(HIVE_METASTORE_ERROR, e); @@ -141,11 +142,11 @@ public PartitionStatistics getTableStatistics(String databaseName, String tableN } @Override - public Map getPartitionStatistics(String databaseName, String tableName, Set partitionNames) + public Map getPartitionStatistics(MetastoreContext metastoreContext, String databaseName, String tableName, Set partitionNames) { - Table table = getTable(databaseName, tableName).orElseThrow(() -> new TableNotFoundException(new SchemaTableName(databaseName, tableName))); + Table table = getTable(metastoreContext, databaseName, tableName).orElseThrow(() -> new TableNotFoundException(new SchemaTableName(databaseName, tableName))); - Map partitionBasicStatistics = getPartitionsByNames(databaseName, tableName, ImmutableList.copyOf(partitionNames)).entrySet().stream() + Map partitionBasicStatistics = getPartitionsByNames(metastoreContext, databaseName, tableName, ImmutableList.copyOf(partitionNames)).entrySet().stream() .filter(entry -> entry.getValue().isPresent()) .collect(toImmutableMap( entry -> MetastoreUtil.makePartName(table.getPartitionColumns(), entry.getValue().get().getValues()), @@ -173,7 +174,7 @@ public Map getPartitionStatistics(String databaseNa .filter(entry -> !entry.getValue().isEmpty()) .collect(toImmutableMap( Map.Entry::getKey, - entry -> groupStatisticsByColumn(entry.getValue(), partitionRowCounts.getOrDefault(entry.getKey(), OptionalLong.empty())))); + entry -> groupStatisticsByColumn(metastoreContext, entry.getValue(), partitionRowCounts.getOrDefault(entry.getKey(), OptionalLong.empty())))); ImmutableMap.Builder result = ImmutableMap.builder(); for (String partitionName : partitionBasicStatistics.keySet()) { @@ -185,19 +186,19 @@ public Map getPartitionStatistics(String databaseNa } @Override - public void updateTableStatistics(String databaseName, String tableName, Function update) + public void updateTableStatistics(MetastoreContext metastoreContext, String databaseName, String tableName, Function update) { throw new UnsupportedOperationException("updateTableStatistics is not supported in AlluxioHiveMetastore"); } @Override - public void updatePartitionStatistics(String databaseName, String tableName, String partitionName, Function update) + public void updatePartitionStatistics(MetastoreContext metastoreContext, String databaseName, String tableName, String partitionName, Function update) { throw new UnsupportedOperationException("updatePartitionStatistics is not supported in AlluxioHiveMetastore"); } @Override - public Optional> getAllTables(String databaseName) + public Optional> getAllTables(MetastoreContext metastoreContext, String databaseName) { try { return Optional.of(client.getAllTables(databaseName)); @@ -208,80 +209,80 @@ public Optional> getAllTables(String databaseName) } @Override - public Optional> getAllViews(String databaseName) + public Optional> getAllViews(MetastoreContext metastoreContext, String databaseName) { // TODO: Add views on the server side return Optional.of(Collections.emptyList()); } @Override - public void createDatabase(Database database) + public void createDatabase(MetastoreContext metastoreContext, Database database) { throw new UnsupportedOperationException("createDatabase is not supported in AlluxioHiveMetastore"); } @Override - public void dropDatabase(String databaseName) + public void dropDatabase(MetastoreContext metastoreContext, String databaseName) { throw new UnsupportedOperationException("dropDatabase is not supported in AlluxioHiveMetastore"); } @Override - public void renameDatabase(String databaseName, String newDatabaseName) + public void renameDatabase(MetastoreContext metastoreContext, String databaseName, String newDatabaseName) { throw new UnsupportedOperationException("renameDatabase is not supported in AlluxioHiveMetastore"); } @Override - public void createTable(Table table, PrincipalPrivileges principalPrivileges) + public void createTable(MetastoreContext metastoreContext, Table table, PrincipalPrivileges principalPrivileges) { throw new UnsupportedOperationException("createTable is not supported in AlluxioHiveMetastore"); } @Override - public void dropTable(String databaseName, String tableName, boolean deleteData) + public void dropTable(MetastoreContext metastoreContext, String databaseName, String tableName, boolean deleteData) { throw new UnsupportedOperationException("dropTable is not supported in AlluxioHiveMetastore"); } @Override - public void replaceTable(String databaseName, String tableName, Table newTable, PrincipalPrivileges principalPrivileges) + public void replaceTable(MetastoreContext metastoreContext, String databaseName, String tableName, Table newTable, PrincipalPrivileges principalPrivileges) { throw new UnsupportedOperationException("replaceTable is not supported in AlluxioHiveMetastore"); } @Override - public void renameTable(String databaseName, String tableName, String newDatabaseName, String newTableName) + public void renameTable(MetastoreContext metastoreContext, String databaseName, String tableName, String newDatabaseName, String newTableName) { throw new UnsupportedOperationException("renameTable is not supported in AlluxioHiveMetastore"); } @Override - public void addColumn(String databaseName, String tableName, String columnName, HiveType columnType, String columnComment) + public void addColumn(MetastoreContext metastoreContext, String databaseName, String tableName, String columnName, HiveType columnType, String columnComment) { throw new UnsupportedOperationException("addColumn is not supported in AlluxioHiveMetastore"); } @Override - public void renameColumn(String databaseName, String tableName, String oldColumnName, String newColumnName) + public void renameColumn(MetastoreContext metastoreContext, String databaseName, String tableName, String oldColumnName, String newColumnName) { throw new UnsupportedOperationException("renameColumn is not supported in AlluxioHiveMetastore"); } @Override - public void dropColumn(String databaseName, String tableName, String columnName) + public void dropColumn(MetastoreContext metastoreContext, String databaseName, String tableName, String columnName) { throw new UnsupportedOperationException("dropColumn is not supported in AlluxioHiveMetastore"); } @Override - public Optional getPartition(String databaseName, String tableName, List partitionValues) + public Optional getPartition(MetastoreContext metastoreContext, String databaseName, String tableName, List partitionValues) { throw new UnsupportedOperationException("getPartition is not supported in AlluxioHiveMetastore"); } @Override - public Optional> getPartitionNames(String databaseName, String tableName) + public Optional> getPartitionNames(MetastoreContext metastoreContext, String databaseName, String tableName) { try { List partitionInfos = AlluxioProtoUtils.toPartitionInfoList(client.readTable(databaseName, tableName, Constraint.getDefaultInstance())); @@ -293,14 +294,15 @@ public Optional> getPartitionNames(String databaseName, String tabl } @Override - public List getPartitionNamesByFilter(String databaseName, String tableName, Map partitionPredicates) + public List getPartitionNamesByFilter(MetastoreContext metastoreContext, String databaseName, String tableName, Map partitionPredicates) { List parts = convertPredicateToParts(partitionPredicates); - return getPartitionNamesByParts(databaseName, tableName, parts).orElse(ImmutableList.of()); + return getPartitionNamesByParts(metastoreContext, databaseName, tableName, parts).orElse(ImmutableList.of()); } @Override public List getPartitionNamesWithVersionByFilter( + MetastoreContext metastoreContext, String databaseName, String tableName, Map partitionPredicates) @@ -317,7 +319,7 @@ public List getPartitionNamesWithVersionByFilter( * @param parts list of values which returned partitions should contain * @return optionally, a list of strings where each entry is in the form of {key}={value} */ - public Optional> getPartitionNamesByParts(String databaseName, String tableName, List parts) + public Optional> getPartitionNamesByParts(MetastoreContext metastoreContext, String databaseName, String tableName, List parts) { try { List partitionInfos = AlluxioProtoUtils.toPartitionInfoList(client.readTable(databaseName, tableName, Constraint.getDefaultInstance())); @@ -348,7 +350,7 @@ public Optional> getPartitionNamesByParts(String databaseName, Stri } @Override - public Map> getPartitionsByNames(String databaseName, String tableName, List partitionNames) + public Map> getPartitionsByNames(MetastoreContext metastoreContext, String databaseName, String tableName, List partitionNames) { if (partitionNames.isEmpty()) { return ImmutableMap.of(); @@ -370,73 +372,73 @@ public Map> getPartitionsByNames(String databaseName } @Override - public void addPartitions(String databaseName, String tableName, List partitions) + public void addPartitions(MetastoreContext metastoreContext, String databaseName, String tableName, List partitions) { throw new UnsupportedOperationException("addPartitions is not supported in AlluxioHiveMetastore"); } @Override - public void dropPartition(String databaseName, String tableName, List parts, boolean deleteData) + public void dropPartition(MetastoreContext metastoreContext, String databaseName, String tableName, List parts, boolean deleteData) { throw new UnsupportedOperationException("dropPartition is not supported in AlluxioHiveMetastore"); } @Override - public void alterPartition(String databaseName, String tableName, PartitionWithStatistics partition) + public void alterPartition(MetastoreContext metastoreContext, String databaseName, String tableName, PartitionWithStatistics partition) { throw new UnsupportedOperationException("alterPartition is not supported in AlluxioHiveMetastore"); } @Override - public void createRole(String role, String grantor) + public void createRole(MetastoreContext metastoreContext, String role, String grantor) { throw new UnsupportedOperationException("createRole is not supported in AlluxioHiveMetastore"); } @Override - public void dropRole(String role) + public void dropRole(MetastoreContext metastoreContext, String role) { throw new UnsupportedOperationException("dropRole is not supported in AlluxioHiveMetastore"); } @Override - public Set listRoles() + public Set listRoles(MetastoreContext metastoreContext) { throw new UnsupportedOperationException("listRoles is not supported in AlluxioHiveMetastore"); } @Override - public void grantRoles(Set roles, Set grantees, boolean withAdminOption, PrestoPrincipal grantor) + public void grantRoles(MetastoreContext metastoreContext, Set roles, Set grantees, boolean withAdminOption, PrestoPrincipal grantor) { throw new UnsupportedOperationException("grantRoles is not supported in AlluxioHiveMetastore"); } @Override - public void revokeRoles(Set roles, Set grantees, boolean adminOptionFor, PrestoPrincipal grantor) + public void revokeRoles(MetastoreContext metastoreContext, Set roles, Set grantees, boolean adminOptionFor, PrestoPrincipal grantor) { throw new UnsupportedOperationException("revokeRoles is not supported in AlluxioHiveMetastore"); } @Override - public Set listRoleGrants(PrestoPrincipal principal) + public Set listRoleGrants(MetastoreContext metastoreContext, PrestoPrincipal principal) { throw new UnsupportedOperationException("listRoleGrants is not supported in AlluxioHiveMetastore"); } @Override - public void grantTablePrivileges(String databaseName, String tableName, PrestoPrincipal grantee, Set privileges) + public void grantTablePrivileges(MetastoreContext metastoreContext, String databaseName, String tableName, PrestoPrincipal grantee, Set privileges) { throw new UnsupportedOperationException("grantTablePrivileges is not supported in AlluxioHiveMetastore"); } @Override - public void revokeTablePrivileges(String databaseName, String tableName, PrestoPrincipal grantee, Set privileges) + public void revokeTablePrivileges(MetastoreContext metastoreContext, String databaseName, String tableName, PrestoPrincipal grantee, Set privileges) { throw new UnsupportedOperationException("revokeTablePrivileges is not supported in AlluxioHiveMetastore"); } @Override - public Set listTablePrivileges(String databaseName, String tableName, PrestoPrincipal principal) + public Set listTablePrivileges(MetastoreContext metastoreContext, String databaseName, String tableName, PrestoPrincipal principal) { throw new UnsupportedOperationException("listTablePrivileges is not supported in AlluxioHiveMetastore"); } diff --git a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/file/FileHiveMetastore.java b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/file/FileHiveMetastore.java index 3411ea82a119c..0d4ad8fceda68 100644 --- a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/file/FileHiveMetastore.java +++ b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/file/FileHiveMetastore.java @@ -28,6 +28,7 @@ import com.facebook.presto.hive.metastore.ExtendedHiveMetastore; import com.facebook.presto.hive.metastore.HiveColumnStatistics; import com.facebook.presto.hive.metastore.HivePrivilegeInfo; +import com.facebook.presto.hive.metastore.MetastoreContext; import com.facebook.presto.hive.metastore.MetastoreUtil; import com.facebook.presto.hive.metastore.Partition; import com.facebook.presto.hive.metastore.PartitionNameWithVersion; @@ -145,7 +146,7 @@ public FileHiveMetastore(HdfsEnvironment hdfsEnvironment, String catalogDirector } @Override - public synchronized void createDatabase(Database database) + public synchronized void createDatabase(MetastoreContext metastoreContext, Database database) { requireNonNull(database, "database is null"); @@ -153,19 +154,19 @@ public synchronized void createDatabase(Database database) throw new PrestoException(HIVE_METASTORE_ERROR, "Database can not be created with a location set"); } - verifyDatabaseNotExists(database.getDatabaseName()); + verifyDatabaseNotExists(metastoreContext, database.getDatabaseName()); Path databaseMetadataDirectory = getDatabaseMetadataDirectory(database.getDatabaseName()); writeSchemaFile("database", databaseMetadataDirectory, databaseCodec, new DatabaseMetadata(database), false); } @Override - public synchronized void dropDatabase(String databaseName) + public synchronized void dropDatabase(MetastoreContext metastoreContext, String databaseName) { requireNonNull(databaseName, "databaseName is null"); - getRequiredDatabase(databaseName); - if (!getAllTables(databaseName).orElse(ImmutableList.of()).isEmpty()) { + getRequiredDatabase(metastoreContext, databaseName); + if (!getAllTables(metastoreContext, databaseName).orElse(ImmutableList.of()).isEmpty()) { throw new PrestoException(HIVE_METASTORE_ERROR, "Database " + databaseName + " is not empty"); } @@ -173,13 +174,13 @@ public synchronized void dropDatabase(String databaseName) } @Override - public synchronized void renameDatabase(String databaseName, String newDatabaseName) + public synchronized void renameDatabase(MetastoreContext metastoreContext, String databaseName, String newDatabaseName) { requireNonNull(databaseName, "databaseName is null"); requireNonNull(newDatabaseName, "newDatabaseName is null"); - getRequiredDatabase(databaseName); - verifyDatabaseNotExists(newDatabaseName); + getRequiredDatabase(metastoreContext, databaseName); + verifyDatabaseNotExists(metastoreContext, newDatabaseName); try { if (!metadataFileSystem.rename(getDatabaseMetadataDirectory(databaseName), getDatabaseMetadataDirectory(newDatabaseName))) { @@ -192,7 +193,7 @@ public synchronized void renameDatabase(String databaseName, String newDatabaseN } @Override - public synchronized Optional getDatabase(String databaseName) + public synchronized Optional getDatabase(MetastoreContext metastoreContext, String databaseName) { requireNonNull(databaseName, "databaseName is null"); @@ -201,21 +202,21 @@ public synchronized Optional getDatabase(String databaseName) .map(databaseMetadata -> databaseMetadata.toDatabase(databaseName, databaseMetadataDirectory.toString())); } - private Database getRequiredDatabase(String databaseName) + private Database getRequiredDatabase(MetastoreContext metastoreContext, String databaseName) { - return getDatabase(databaseName) + return getDatabase(metastoreContext, databaseName) .orElseThrow(() -> new SchemaNotFoundException(databaseName)); } - private void verifyDatabaseNotExists(String databaseName) + private void verifyDatabaseNotExists(MetastoreContext metastoreContext, String databaseName) { - if (getDatabase(databaseName).isPresent()) { + if (getDatabase(metastoreContext, databaseName).isPresent()) { throw new SchemaAlreadyExistsException(databaseName); } } @Override - public synchronized List getAllDatabases() + public synchronized List getAllDatabases(MetastoreContext metastoreContext) { List databases = getChildSchemaDirectories(catalogDirectory).stream() .map(Path::getName) @@ -224,11 +225,11 @@ public synchronized List getAllDatabases() } @Override - public synchronized void createTable(Table table, PrincipalPrivileges principalPrivileges) + public synchronized void createTable(MetastoreContext metastoreContext, Table table, PrincipalPrivileges principalPrivileges) { checkArgument(!table.getTableType().equals(TEMPORARY_TABLE), "temporary tables must never be committed to the metastore"); - verifyTableNotExists(table.getDatabaseName(), table.getTableName()); + verifyTableNotExists(metastoreContext, table.getDatabaseName(), table.getTableName()); Path tableMetadataDirectory = getTableMetadataDirectory(table); @@ -263,15 +264,15 @@ else if (table.getTableType().equals(EXTERNAL_TABLE)) { writeSchemaFile("table", tableMetadataDirectory, tableCodec, new TableMetadata(table), false); for (Entry> entry : principalPrivileges.getUserPrivileges().asMap().entrySet()) { - setTablePrivileges(new PrestoPrincipal(USER, entry.getKey()), table.getDatabaseName(), table.getTableName(), entry.getValue()); + setTablePrivileges(metastoreContext, new PrestoPrincipal(USER, entry.getKey()), table.getDatabaseName(), table.getTableName(), entry.getValue()); } for (Entry> entry : principalPrivileges.getRolePrivileges().asMap().entrySet()) { - setTablePrivileges(new PrestoPrincipal(ROLE, entry.getKey()), table.getDatabaseName(), table.getTableName(), entry.getValue()); + setTablePrivileges(metastoreContext, new PrestoPrincipal(ROLE, entry.getKey()), table.getDatabaseName(), table.getTableName(), entry.getValue()); } } @Override - public synchronized Optional
getTable(String databaseName, String tableName) + public synchronized Optional
getTable(MetastoreContext metastoreContext, String databaseName, String tableName) { requireNonNull(databaseName, "databaseName is null"); requireNonNull(tableName, "tableName is null"); @@ -282,13 +283,13 @@ public synchronized Optional
getTable(String databaseName, String tableNa } @Override - public Set getSupportedColumnStatistics(Type type) + public Set getSupportedColumnStatistics(MetastoreContext metastoreContext, Type type) { return MetastoreUtil.getSupportedColumnStatistics(type); } @Override - public synchronized PartitionStatistics getTableStatistics(String databaseName, String tableName) + public synchronized PartitionStatistics getTableStatistics(MetastoreContext metastoreContext, String databaseName, String tableName) { Path tableMetadataDirectory = getTableMetadataDirectory(databaseName, tableName); TableMetadata tableMetadata = readSchemaFile("table", tableMetadataDirectory, tableCodec) @@ -299,9 +300,9 @@ public synchronized PartitionStatistics getTableStatistics(String databaseName, } @Override - public synchronized Map getPartitionStatistics(String databaseName, String tableName, Set partitionNames) + public synchronized Map getPartitionStatistics(MetastoreContext metastoreContext, String databaseName, String tableName, Set partitionNames) { - Table table = getRequiredTable(databaseName, tableName); + Table table = getRequiredTable(metastoreContext, databaseName, tableName); ImmutableMap.Builder statistics = ImmutableMap.builder(); for (String partitionName : partitionNames) { List partitionValues = extractPartitionValues(partitionName); @@ -314,23 +315,23 @@ public synchronized Map getPartitionStatistics(Stri return statistics.build(); } - private Table getRequiredTable(String databaseName, String tableName) + private Table getRequiredTable(MetastoreContext metastoreContext, String databaseName, String tableName) { - return getTable(databaseName, tableName) + return getTable(metastoreContext, databaseName, tableName) .orElseThrow(() -> new TableNotFoundException(new SchemaTableName(databaseName, tableName))); } - private void verifyTableNotExists(String newDatabaseName, String newTableName) + private void verifyTableNotExists(MetastoreContext metastoreContext, String newDatabaseName, String newTableName) { - if (getTable(newDatabaseName, newTableName).isPresent()) { + if (getTable(metastoreContext, newDatabaseName, newTableName).isPresent()) { throw new TableAlreadyExistsException(new SchemaTableName(newDatabaseName, newTableName)); } } @Override - public synchronized void updateTableStatistics(String databaseName, String tableName, Function update) + public synchronized void updateTableStatistics(MetastoreContext metastoreContext, String databaseName, String tableName, Function update) { - PartitionStatistics originalStatistics = getTableStatistics(databaseName, tableName); + PartitionStatistics originalStatistics = getTableStatistics(metastoreContext, databaseName, tableName); PartitionStatistics updatedStatistics = update.apply(originalStatistics); Path tableMetadataDirectory = getTableMetadataDirectory(databaseName, tableName); @@ -345,15 +346,15 @@ public synchronized void updateTableStatistics(String databaseName, String table } @Override - public synchronized void updatePartitionStatistics(String databaseName, String tableName, String partitionName, Function update) + public synchronized void updatePartitionStatistics(MetastoreContext metastoreContext, String databaseName, String tableName, String partitionName, Function update) { - PartitionStatistics originalStatistics = getPartitionStatistics(databaseName, tableName, ImmutableSet.of(partitionName)).get(partitionName); + PartitionStatistics originalStatistics = getPartitionStatistics(metastoreContext, databaseName, tableName, ImmutableSet.of(partitionName)).get(partitionName); if (originalStatistics == null) { throw new PrestoException(HIVE_PARTITION_DROPPED_DURING_QUERY, "Statistics result does not contain entry for partition: " + partitionName); } PartitionStatistics updatedStatistics = update.apply(originalStatistics); - Table table = getRequiredTable(databaseName, tableName); + Table table = getRequiredTable(metastoreContext, databaseName, tableName); List partitionValues = extractPartitionValues(partitionName); Path partitionDirectory = getPartitionMetadataDirectory(table, partitionValues); PartitionMetadata partitionMetadata = readSchemaFile("partition", partitionDirectory, partitionCodec) @@ -367,11 +368,11 @@ public synchronized void updatePartitionStatistics(String databaseName, String t } @Override - public synchronized Optional> getAllTables(String databaseName) + public synchronized Optional> getAllTables(MetastoreContext metastoreContext, String databaseName) { requireNonNull(databaseName, "databaseName is null"); - Optional database = getDatabase(databaseName); + Optional database = getDatabase(metastoreContext, databaseName); if (!database.isPresent()) { return Optional.empty(); } @@ -384,15 +385,15 @@ public synchronized Optional> getAllTables(String databaseName) } @Override - public synchronized Optional> getAllViews(String databaseName) + public synchronized Optional> getAllViews(MetastoreContext metastoreContext, String databaseName) { - Optional> tables = getAllTables(databaseName); + Optional> tables = getAllTables(metastoreContext, databaseName); if (!tables.isPresent()) { return Optional.empty(); } List views = tables.get().stream() - .map(tableName -> getTable(databaseName, tableName)) + .map(tableName -> getTable(metastoreContext, databaseName, tableName)) .filter(Optional::isPresent) .map(Optional::get) .filter(table -> table.getTableType().equals(VIRTUAL_VIEW)) @@ -403,12 +404,12 @@ public synchronized Optional> getAllViews(String databaseName) } @Override - public synchronized void dropTable(String databaseName, String tableName, boolean deleteData) + public synchronized void dropTable(MetastoreContext metastoreContext, String databaseName, String tableName, boolean deleteData) { requireNonNull(databaseName, "databaseName is null"); requireNonNull(tableName, "tableName is null"); - Table table = getRequiredTable(databaseName, tableName); + Table table = getRequiredTable(metastoreContext, databaseName, tableName); Path tableMetadataDirectory = getTableMetadataDirectory(databaseName, tableName); @@ -424,11 +425,11 @@ public synchronized void dropTable(String databaseName, String tableName, boolea } @Override - public synchronized void replaceTable(String databaseName, String tableName, Table newTable, PrincipalPrivileges principalPrivileges) + public synchronized void replaceTable(MetastoreContext metastoreContext, String databaseName, String tableName, Table newTable, PrincipalPrivileges principalPrivileges) { checkArgument(!newTable.getTableType().equals(TEMPORARY_TABLE), "temporary tables must never be stored in the metastore"); - Table table = getRequiredTable(databaseName, tableName); + Table table = getRequiredTable(metastoreContext, databaseName, tableName); if (!table.getTableType().equals(VIRTUAL_VIEW) || !newTable.getTableType().equals(VIRTUAL_VIEW)) { throw new PrestoException(HIVE_METASTORE_ERROR, "Only views can be updated with replaceTable"); } @@ -443,26 +444,26 @@ public synchronized void replaceTable(String databaseName, String tableName, Tab deleteTablePrivileges(table); for (Entry> entry : principalPrivileges.getUserPrivileges().asMap().entrySet()) { - setTablePrivileges(new PrestoPrincipal(USER, entry.getKey()), table.getDatabaseName(), table.getTableName(), entry.getValue()); + setTablePrivileges(metastoreContext, new PrestoPrincipal(USER, entry.getKey()), table.getDatabaseName(), table.getTableName(), entry.getValue()); } for (Entry> entry : principalPrivileges.getRolePrivileges().asMap().entrySet()) { - setTablePrivileges(new PrestoPrincipal(ROLE, entry.getKey()), table.getDatabaseName(), table.getTableName(), entry.getValue()); + setTablePrivileges(metastoreContext, new PrestoPrincipal(ROLE, entry.getKey()), table.getDatabaseName(), table.getTableName(), entry.getValue()); } } @Override - public synchronized void renameTable(String databaseName, String tableName, String newDatabaseName, String newTableName) + public synchronized void renameTable(MetastoreContext metastoreContext, String databaseName, String tableName, String newDatabaseName, String newTableName) { requireNonNull(databaseName, "databaseName is null"); requireNonNull(tableName, "tableName is null"); requireNonNull(newDatabaseName, "newDatabaseName is null"); requireNonNull(newTableName, "newTableName is null"); - getRequiredTable(databaseName, tableName); - getRequiredDatabase(newDatabaseName); + getRequiredTable(metastoreContext, databaseName, tableName); + getRequiredDatabase(metastoreContext, newDatabaseName); // verify new table does not exist - verifyTableNotExists(newDatabaseName, newTableName); + verifyTableNotExists(metastoreContext, newDatabaseName, newTableName); try { if (!metadataFileSystem.rename(getTableMetadataDirectory(databaseName, tableName), getTableMetadataDirectory(newDatabaseName, newTableName))) { @@ -475,7 +476,7 @@ public synchronized void renameTable(String databaseName, String tableName, Stri } @Override - public synchronized void addColumn(String databaseName, String tableName, String columnName, HiveType columnType, String columnComment) + public synchronized void addColumn(MetastoreContext metastoreContext, String databaseName, String tableName, String columnName, HiveType columnType, String columnComment) { alterTable(databaseName, tableName, oldTable -> { if (oldTable.getColumn(columnName).isPresent()) { @@ -490,7 +491,7 @@ public synchronized void addColumn(String databaseName, String tableName, String } @Override - public synchronized void renameColumn(String databaseName, String tableName, String oldColumnName, String newColumnName) + public synchronized void renameColumn(MetastoreContext metastoreContext, String databaseName, String tableName, String oldColumnName, String newColumnName) { alterTable(databaseName, tableName, oldTable -> { if (oldTable.getColumn(newColumnName).isPresent()) { @@ -521,10 +522,10 @@ public synchronized void renameColumn(String databaseName, String tableName, Str } @Override - public synchronized void dropColumn(String databaseName, String tableName, String columnName) + public synchronized void dropColumn(MetastoreContext metastoreContext, String databaseName, String tableName, String columnName) { alterTable(databaseName, tableName, oldTable -> { - verifyCanDropColumn(this, databaseName, tableName, columnName); + verifyCanDropColumn(this, metastoreContext, databaseName, tableName, columnName); if (!oldTable.getColumn(columnName).isPresent()) { SchemaTableName name = new SchemaTableName(databaseName, tableName); throw new ColumnNotFoundException(name, columnName); @@ -559,13 +560,13 @@ private void alterTable(String databaseName, String tableName, Function partitions) + public synchronized void addPartitions(MetastoreContext metastoreContext, String databaseName, String tableName, List partitions) { requireNonNull(databaseName, "databaseName is null"); requireNonNull(tableName, "tableName is null"); requireNonNull(partitions, "partitions is null"); - Table table = getRequiredTable(databaseName, tableName); + Table table = getRequiredTable(metastoreContext, databaseName, tableName); checkArgument(EnumSet.of(MANAGED_TABLE, EXTERNAL_TABLE, MATERIALIZED_VIEW).contains(table.getTableType()), "Invalid table type: %s", table.getTableType()); @@ -641,13 +642,13 @@ else if (table.getTableType().equals(EXTERNAL_TABLE)) { } @Override - public synchronized void dropPartition(String databaseName, String tableName, List partitionValues, boolean deleteData) + public synchronized void dropPartition(MetastoreContext metastoreContext, String databaseName, String tableName, List partitionValues, boolean deleteData) { requireNonNull(databaseName, "databaseName is null"); requireNonNull(tableName, "tableName is null"); requireNonNull(partitionValues, "partitionValues is null"); - Optional
tableReference = getTable(databaseName, tableName); + Optional
tableReference = getTable(metastoreContext, databaseName, tableName); if (!tableReference.isPresent()) { return; } @@ -663,9 +664,9 @@ public synchronized void dropPartition(String databaseName, String tableName, Li } @Override - public synchronized void alterPartition(String databaseName, String tableName, PartitionWithStatistics partitionWithStatistics) + public synchronized void alterPartition(MetastoreContext metastoreContext, String databaseName, String tableName, PartitionWithStatistics partitionWithStatistics) { - Table table = getRequiredTable(databaseName, tableName); + Table table = getRequiredTable(metastoreContext, databaseName, tableName); Partition partition = partitionWithStatistics.getPartition(); verifiedPartition(table, partition); @@ -675,34 +676,34 @@ public synchronized void alterPartition(String databaseName, String tableName, P } @Override - public synchronized void createRole(String role, String grantor) + public synchronized void createRole(MetastoreContext metastoreContext, String role, String grantor) { - Set roles = new HashSet<>(listRoles()); + Set roles = new HashSet<>(listRoles(metastoreContext)); roles.add(role); writeFile("roles", getRolesFile(), rolesCodec, ImmutableList.copyOf(roles), true); } @Override - public synchronized void dropRole(String role) + public synchronized void dropRole(MetastoreContext metastoreContext, String role) { - Set roles = new HashSet<>(listRoles()); + Set roles = new HashSet<>(listRoles(metastoreContext)); roles.remove(role); writeFile("roles", getRolesFile(), rolesCodec, ImmutableList.copyOf(roles), true); - Set grants = listRoleGrantsSanitized(); + Set grants = listRoleGrantsSanitized(metastoreContext); writeRoleGrantsFile(grants); } @Override - public synchronized Set listRoles() + public synchronized Set listRoles(MetastoreContext metastoreContext) { return ImmutableSet.copyOf(readFile("roles", getRolesFile(), rolesCodec).orElse(ImmutableList.of())); } @Override - public synchronized void grantRoles(Set roles, Set grantees, boolean withAdminOption, PrestoPrincipal grantor) + public synchronized void grantRoles(MetastoreContext metastoreContext, Set roles, Set grantees, boolean withAdminOption, PrestoPrincipal grantor) { - Set existingRoles = listRoles(); - Set existingGrants = listRoleGrantsSanitized(); + Set existingRoles = listRoles(metastoreContext); + Set existingGrants = listRoleGrantsSanitized(metastoreContext); Set modifiedGrants = new HashSet<>(existingGrants); for (PrestoPrincipal grantee : grantees) { for (String role : roles) { @@ -731,9 +732,9 @@ public synchronized void grantRoles(Set roles, Set gran } @Override - public synchronized void revokeRoles(Set roles, Set grantees, boolean adminOptionFor, PrestoPrincipal grantor) + public synchronized void revokeRoles(MetastoreContext metastoreContext, Set roles, Set grantees, boolean adminOptionFor, PrestoPrincipal grantor) { - Set existingGrants = listRoleGrantsSanitized(); + Set existingGrants = listRoleGrantsSanitized(metastoreContext); Set modifiedGrants = new HashSet<>(existingGrants); for (PrestoPrincipal grantee : grantees) { for (String role : roles) { @@ -759,7 +760,7 @@ public synchronized void revokeRoles(Set roles, Set gra } @Override - public synchronized Set listRoleGrants(PrestoPrincipal principal) + public synchronized Set listRoleGrants(MetastoreContext metastoreContext, PrestoPrincipal principal) { ImmutableSet.Builder result = ImmutableSet.builder(); if (principal.getType() == USER) { @@ -768,16 +769,16 @@ public synchronized Set listRoleGrants(PrestoPrincipal principal) result.add(new RoleGrant(principal, ADMIN_ROLE_NAME, true)); } } - result.addAll(listRoleGrantsSanitized().stream() + result.addAll(listRoleGrantsSanitized(metastoreContext).stream() .filter(grant -> grant.getGrantee().equals(principal)) .collect(toSet())); return result.build(); } - private synchronized Set listRoleGrantsSanitized() + private synchronized Set listRoleGrantsSanitized(MetastoreContext metastoreContext) { Set grants = readRoleGrantsFile(); - Set existingRoles = listRoles(); + Set existingRoles = listRoles(metastoreContext); return removeDuplicatedEntries(removeNonExistingRoles(grants, existingRoles)); } @@ -818,12 +819,12 @@ private void writeRoleGrantsFile(Set roleGrants) } @Override - public synchronized Optional> getPartitionNames(String databaseName, String tableName) + public synchronized Optional> getPartitionNames(MetastoreContext metastoreContext, String databaseName, String tableName) { requireNonNull(databaseName, "databaseName is null"); requireNonNull(tableName, "tableName is null"); - Optional
tableReference = getTable(databaseName, tableName); + Optional
tableReference = getTable(metastoreContext, databaseName, tableName); if (!tableReference.isPresent()) { return Optional.empty(); } @@ -880,13 +881,13 @@ private List> listPartitions(Path director, List part } @Override - public synchronized Optional getPartition(String databaseName, String tableName, List partitionValues) + public synchronized Optional getPartition(MetastoreContext metastoreContext, String databaseName, String tableName, List partitionValues) { requireNonNull(databaseName, "databaseName is null"); requireNonNull(tableName, "tableName is null"); requireNonNull(partitionValues, "partitionValues is null"); - Optional
tableReference = getTable(databaseName, tableName); + Optional
tableReference = getTable(metastoreContext, databaseName, tableName); if (!tableReference.isPresent()) { return Optional.empty(); } @@ -899,13 +900,14 @@ public synchronized Optional getPartition(String databaseName, String @Override public synchronized List getPartitionNamesByFilter( + MetastoreContext metastoreContext, String databaseName, String tableName, Map partitionPredicates) { List parts = convertPredicateToParts(partitionPredicates); // todo this should be more efficient by selectively walking the directory tree - return getPartitionNames(databaseName, tableName).map(partitionNames -> partitionNames.stream() + return getPartitionNames(metastoreContext, databaseName, tableName).map(partitionNames -> partitionNames.stream() .filter(partitionName -> partitionMatches(partitionName, parts)) .collect(toImmutableList())) .orElse(ImmutableList.of()); @@ -913,6 +915,7 @@ public synchronized List getPartitionNamesByFilter( @Override public List getPartitionNamesWithVersionByFilter( + MetastoreContext metastoreContext, String databaseName, String tableName, Map partitionPredicates) @@ -936,21 +939,21 @@ private static boolean partitionMatches(String partitionName, List parts } @Override - public synchronized Map> getPartitionsByNames(String databaseName, String tableName, List partitionNames) + public synchronized Map> getPartitionsByNames(MetastoreContext metastoreContext, String databaseName, String tableName, List partitionNames) { ImmutableMap.Builder> builder = ImmutableMap.builder(); for (String partitionName : partitionNames) { List partitionValues = toPartitionValues(partitionName); - builder.put(partitionName, getPartition(databaseName, tableName, partitionValues)); + builder.put(partitionName, getPartition(metastoreContext, databaseName, tableName, partitionValues)); } return builder.build(); } @Override - public synchronized Set listTablePrivileges(String databaseName, String tableName, PrestoPrincipal principal) + public synchronized Set listTablePrivileges(MetastoreContext metastoreContext, String databaseName, String tableName, PrestoPrincipal principal) { ImmutableSet.Builder result = ImmutableSet.builder(); - Table table = getRequiredTable(databaseName, tableName); + Table table = getRequiredTable(metastoreContext, databaseName, tableName); if (principal.getType() == USER && table.getOwner().equals(principal.getName())) { result.add(new HivePrivilegeInfo(OWNERSHIP, true, principal, principal)); } @@ -962,21 +965,22 @@ public synchronized Set listTablePrivileges(String databaseNa } @Override - public synchronized void grantTablePrivileges(String databaseName, String tableName, PrestoPrincipal grantee, Set privileges) + public synchronized void grantTablePrivileges(MetastoreContext metastoreContext, String databaseName, String tableName, PrestoPrincipal grantee, Set privileges) { - setTablePrivileges(grantee, databaseName, tableName, privileges); + setTablePrivileges(metastoreContext, grantee, databaseName, tableName, privileges); } @Override - public synchronized void revokeTablePrivileges(String databaseName, String tableName, PrestoPrincipal grantee, Set privileges) + public synchronized void revokeTablePrivileges(MetastoreContext metastoreContext, String databaseName, String tableName, PrestoPrincipal grantee, Set privileges) { - Set currentPrivileges = listTablePrivileges(databaseName, tableName, grantee); + Set currentPrivileges = listTablePrivileges(metastoreContext, databaseName, tableName, grantee); currentPrivileges.removeAll(privileges); - setTablePrivileges(grantee, databaseName, tableName, currentPrivileges); + setTablePrivileges(metastoreContext, grantee, databaseName, tableName, currentPrivileges); } private synchronized void setTablePrivileges( + MetastoreContext metastoreContext, PrestoPrincipal grantee, String databaseName, String tableName, @@ -988,7 +992,7 @@ private synchronized void setTablePrivileges( requireNonNull(privileges, "privileges is null"); try { - Table table = getRequiredTable(databaseName, tableName); + Table table = getRequiredTable(metastoreContext, databaseName, tableName); Path permissionsDirectory = getPermissionsDirectory(table); diff --git a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/glue/GlueHiveMetastore.java b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/glue/GlueHiveMetastore.java index febbddde2ef17..770d64f512cab 100644 --- a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/glue/GlueHiveMetastore.java +++ b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/glue/GlueHiveMetastore.java @@ -68,6 +68,7 @@ import com.facebook.presto.hive.metastore.Database; import com.facebook.presto.hive.metastore.ExtendedHiveMetastore; import com.facebook.presto.hive.metastore.HivePrivilegeInfo; +import com.facebook.presto.hive.metastore.MetastoreContext; import com.facebook.presto.hive.metastore.MetastoreUtil; import com.facebook.presto.hive.metastore.Partition; import com.facebook.presto.hive.metastore.PartitionNameWithVersion; @@ -208,7 +209,7 @@ public GlueMetastoreStats getStats() } @Override - public Optional getDatabase(String databaseName) + public Optional getDatabase(MetastoreContext metastoreContext, String databaseName) { return stats.getGetDatabase().record(() -> { try { @@ -225,7 +226,7 @@ public Optional getDatabase(String databaseName) } @Override - public List getAllDatabases() + public List getAllDatabases(MetastoreContext metastoreContext) { try { List databaseNames = new ArrayList<>(); @@ -245,7 +246,7 @@ public List getAllDatabases() } @Override - public Optional
getTable(String databaseName, String tableName) + public Optional
getTable(MetastoreContext metastoreContext, String databaseName, String tableName) { return getGlueTable(databaseName, tableName).map(table -> GlueToPrestoConverter.convertTable(table, databaseName)); } @@ -276,30 +277,30 @@ private Optional getGlueTable(String da } @Override - public Set getSupportedColumnStatistics(Type type) + public Set getSupportedColumnStatistics(MetastoreContext metastoreContext, Type type) { return ImmutableSet.of(); } - private Table getTableOrElseThrow(String databaseName, String tableName) + private Table getTableOrElseThrow(MetastoreContext metastoreContext, String databaseName, String tableName) { - return getTable(databaseName, tableName) + return getTable(metastoreContext, databaseName, tableName) .orElseThrow(() -> new TableNotFoundException(new SchemaTableName(databaseName, tableName))); } @Override - public PartitionStatistics getTableStatistics(String databaseName, String tableName) + public PartitionStatistics getTableStatistics(MetastoreContext metastoreContext, String databaseName, String tableName) { - Table table = getTable(databaseName, tableName) + Table table = getTable(metastoreContext, databaseName, tableName) .orElseThrow(() -> new TableNotFoundException(new SchemaTableName(databaseName, tableName))); return new PartitionStatistics(getHiveBasicStatistics(table.getParameters()), ImmutableMap.of()); } @Override - public Map getPartitionStatistics(String databaseName, String tableName, Set partitionNames) + public Map getPartitionStatistics(MetastoreContext metastoreContext, String databaseName, String tableName, Set partitionNames) { ImmutableMap.Builder result = ImmutableMap.builder(); - getPartitionsByNames(databaseName, tableName, ImmutableList.copyOf(partitionNames)).forEach((partitionName, optionalPartition) -> { + getPartitionsByNames(metastoreContext, databaseName, tableName, ImmutableList.copyOf(partitionNames)).forEach((partitionName, optionalPartition) -> { Partition partition = optionalPartition.orElseThrow(() -> new PartitionNotFoundException(new SchemaTableName(databaseName, tableName), toPartitionValues(partitionName))); PartitionStatistics partitionStatistics = new PartitionStatistics(getHiveBasicStatistics(partition.getParameters()), ImmutableMap.of()); @@ -309,15 +310,15 @@ public Map getPartitionStatistics(String databaseNa } @Override - public void updateTableStatistics(String databaseName, String tableName, Function update) + public void updateTableStatistics(MetastoreContext metastoreContext, String databaseName, String tableName, Function update) { - PartitionStatistics currentStatistics = getTableStatistics(databaseName, tableName); + PartitionStatistics currentStatistics = getTableStatistics(metastoreContext, databaseName, tableName); PartitionStatistics updatedStatistics = update.apply(currentStatistics); if (!updatedStatistics.getColumnStatistics().isEmpty()) { throw new PrestoException(NOT_SUPPORTED, "Glue metastore does not support column level statistics"); } - Table table = getTableOrElseThrow(databaseName, tableName); + Table table = getTableOrElseThrow(metastoreContext, databaseName, tableName); try { TableInput tableInput = GlueInputConverter.convertTable(table); @@ -337,9 +338,9 @@ public void updateTableStatistics(String databaseName, String tableName, Functio } @Override - public void updatePartitionStatistics(String databaseName, String tableName, String partitionName, Function update) + public void updatePartitionStatistics(MetastoreContext metastoreContext, String databaseName, String tableName, String partitionName, Function update) { - PartitionStatistics currentStatistics = getPartitionStatistics(databaseName, tableName, ImmutableSet.of(partitionName)).get(partitionName); + PartitionStatistics currentStatistics = getPartitionStatistics(metastoreContext, databaseName, tableName, ImmutableSet.of(partitionName)).get(partitionName); if (currentStatistics == null) { throw new PrestoException(HIVE_PARTITION_DROPPED_DURING_QUERY, "Statistics result does not contain entry for partition: " + partitionName); } @@ -349,7 +350,7 @@ public void updatePartitionStatistics(String databaseName, String tableName, Str } List partitionValues = toPartitionValues(partitionName); - Partition partition = getPartition(databaseName, tableName, partitionValues) + Partition partition = getPartition(metastoreContext, databaseName, tableName, partitionValues) .orElseThrow(() -> new PartitionNotFoundException(new SchemaTableName(databaseName, tableName), partitionValues)); try { PartitionInput partitionInput = GlueInputConverter.convertPartition(partition); @@ -370,7 +371,7 @@ public void updatePartitionStatistics(String databaseName, String tableName, Str } @Override - public Optional> getAllTables(String databaseName) + public Optional> getAllTables(MetastoreContext metastoreContext, String databaseName) { try { List tableNames = new ArrayList<>(); @@ -394,7 +395,7 @@ public Optional> getAllTables(String databaseName) } @Override - public Optional> getAllViews(String databaseName) + public Optional> getAllViews(MetastoreContext metastoreContext, String databaseName) { try { List views = new ArrayList<>(); @@ -421,7 +422,7 @@ public Optional> getAllViews(String databaseName) } @Override - public void createDatabase(Database database) + public void createDatabase(MetastoreContext metastoreContext, Database database) { if (!database.getLocation().isPresent() && defaultDir.isPresent()) { String databaseLocation = new Path(defaultDir.get(), database.getDatabaseName()).toString(); @@ -447,7 +448,7 @@ public void createDatabase(Database database) } @Override - public void dropDatabase(String databaseName) + public void dropDatabase(MetastoreContext metastoreContext, String databaseName) { try { stats.getDeleteDatabase().record(() -> glueClient.deleteDatabase(new DeleteDatabaseRequest().withCatalogId(catalogId).withName(databaseName))); @@ -461,10 +462,10 @@ public void dropDatabase(String databaseName) } @Override - public void renameDatabase(String databaseName, String newDatabaseName) + public void renameDatabase(MetastoreContext metastoreContext, String databaseName, String newDatabaseName) { try { - Database database = getDatabase(databaseName).orElseThrow(() -> new SchemaNotFoundException(databaseName)); + Database database = getDatabase(metastoreContext, databaseName).orElseThrow(() -> new SchemaNotFoundException(databaseName)); DatabaseInput renamedDatabase = GlueInputConverter.convertDatabase(database).withName(newDatabaseName); stats.getUpdateDatabase().record(() -> glueClient.updateDatabase(new UpdateDatabaseRequest() .withCatalogId(catalogId) @@ -477,7 +478,7 @@ public void renameDatabase(String databaseName, String newDatabaseName) } @Override - public void createTable(Table table, PrincipalPrivileges principalPrivileges) + public void createTable(MetastoreContext metastoreContext, Table table, PrincipalPrivileges principalPrivileges) { try { TableInput input = GlueInputConverter.convertTable(table); @@ -498,9 +499,9 @@ public void createTable(Table table, PrincipalPrivileges principalPrivileges) } @Override - public void dropTable(String databaseName, String tableName, boolean deleteData) + public void dropTable(MetastoreContext metastoreContext, String databaseName, String tableName, boolean deleteData) { - Table table = getTableOrElseThrow(databaseName, tableName); + Table table = getTableOrElseThrow(metastoreContext, databaseName, tableName); try { stats.getDeleteTable().record(() -> glueClient.deleteTable(new DeleteTableRequest() @@ -535,7 +536,7 @@ private static void deleteDir(HdfsContext context, HdfsEnvironment hdfsEnvironme } @Override - public void replaceTable(String databaseName, String tableName, Table newTable, PrincipalPrivileges principalPrivileges) + public void replaceTable(MetastoreContext metastoreContext, String databaseName, String tableName, Table newTable, PrincipalPrivileges principalPrivileges) { try { TableInput newTableInput = GlueInputConverter.convertTable(newTable); @@ -553,13 +554,13 @@ public void replaceTable(String databaseName, String tableName, Table newTable, } @Override - public void renameTable(String databaseName, String tableName, String newDatabaseName, String newTableName) + public void renameTable(MetastoreContext metastoreContext, String databaseName, String tableName, String newDatabaseName, String newTableName) { throw new PrestoException(NOT_SUPPORTED, "Table rename is not yet supported by Glue service"); } @Override - public void addColumn(String databaseName, String tableName, String columnName, HiveType columnType, String columnComment) + public void addColumn(MetastoreContext metastoreContext, String databaseName, String tableName, String columnName, HiveType columnType, String columnComment) { com.amazonaws.services.glue.model.Table table = getGlueTableOrElseThrow(databaseName, tableName); ImmutableList.Builder newDataColumns = ImmutableList.builder(); @@ -570,7 +571,7 @@ public void addColumn(String databaseName, String tableName, String columnName, } @Override - public void renameColumn(String databaseName, String tableName, String oldColumnName, String newColumnName) + public void renameColumn(MetastoreContext metastoreContext, String databaseName, String tableName, String oldColumnName, String newColumnName) { com.amazonaws.services.glue.model.Table table = getGlueTableOrElseThrow(databaseName, tableName); if (table.getPartitionKeys() != null && table.getPartitionKeys().stream().anyMatch(c -> c.getName().equals(oldColumnName))) { @@ -593,9 +594,9 @@ public void renameColumn(String databaseName, String tableName, String oldColumn } @Override - public void dropColumn(String databaseName, String tableName, String columnName) + public void dropColumn(MetastoreContext metastoreContext, String databaseName, String tableName, String columnName) { - verifyCanDropColumn(this, databaseName, tableName, columnName); + verifyCanDropColumn(this, metastoreContext, databaseName, tableName, columnName); com.amazonaws.services.glue.model.Table table = getGlueTableOrElseThrow(databaseName, tableName); ImmutableList.Builder newDataColumns = ImmutableList.builder(); @@ -635,7 +636,7 @@ private void replaceGlueTable(String databaseName, String tableName, com.amazona } @Override - public Optional getPartition(String databaseName, String tableName, List partitionValues) + public Optional getPartition(MetastoreContext metastoreContext, String databaseName, String tableName, List partitionValues) { return stats.getGetPartition().record(() -> { try { @@ -656,9 +657,9 @@ public Optional getPartition(String databaseName, String tableName, L } @Override - public Optional> getPartitionNames(String databaseName, String tableName) + public Optional> getPartitionNames(MetastoreContext metastoreContext, String databaseName, String tableName) { - Table table = getTableOrElseThrow(databaseName, tableName); + Table table = getTableOrElseThrow(metastoreContext, databaseName, tableName); List partitions = getPartitions(databaseName, tableName, WILDCARD_EXPRESSION); return Optional.of(buildPartitionNames(table.getPartitionColumns(), partitions)); } @@ -676,11 +677,12 @@ public Optional> getPartitionNames(String databaseName, String tabl */ @Override public List getPartitionNamesByFilter( + MetastoreContext metastoreContext, String databaseName, String tableName, Map partitionPredicates) { - Table table = getTableOrElseThrow(databaseName, tableName); + Table table = getTableOrElseThrow(metastoreContext, databaseName, tableName); List parts = convertPredicateToParts(partitionPredicates); String expression = buildGlueExpression(table.getPartitionColumns(), parts); List partitions = getPartitions(databaseName, tableName, expression); @@ -689,6 +691,7 @@ public List getPartitionNamesByFilter( @Override public List getPartitionNamesWithVersionByFilter( + MetastoreContext metastoreContext, String databaseName, String tableName, Map partitionPredicates) @@ -771,7 +774,7 @@ private static List buildPartitionNames(List partitionColumns, L * @return Mapping of partition name to partition object */ @Override - public Map> getPartitionsByNames(String databaseName, String tableName, List partitionNames) + public Map> getPartitionsByNames(MetastoreContext metastoreContext, String databaseName, String tableName, List partitionNames) { requireNonNull(partitionNames, "partitionNames is null"); if (partitionNames.isEmpty()) { @@ -826,7 +829,7 @@ private List batchGetPartition(String databaseName, String tableName, } @Override - public void addPartitions(String databaseName, String tableName, List partitions) + public void addPartitions(MetastoreContext metastoreContext, String databaseName, String tableName, List partitions) { try { List> futures = new ArrayList<>(); @@ -871,10 +874,10 @@ private static void propagatePartitionErrorToPrestoException(String databaseName } @Override - public void dropPartition(String databaseName, String tableName, List parts, boolean deleteData) + public void dropPartition(MetastoreContext metastoreContext, String databaseName, String tableName, List parts, boolean deleteData) { - Table table = getTableOrElseThrow(databaseName, tableName); - Partition partition = getPartition(databaseName, tableName, parts) + Table table = getTableOrElseThrow(metastoreContext, databaseName, tableName); + Partition partition = getPartition(metastoreContext, databaseName, tableName, parts) .orElseThrow(() -> new PartitionNotFoundException(new SchemaTableName(databaseName, tableName), parts)); try { @@ -895,7 +898,7 @@ public void dropPartition(String databaseName, String tableName, List pa } @Override - public void alterPartition(String databaseName, String tableName, PartitionWithStatistics partition) + public void alterPartition(MetastoreContext metastoreContext, String databaseName, String tableName, PartitionWithStatistics partition) { try { PartitionInput newPartition = GlueInputConverter.convertPartition(partition); @@ -915,37 +918,37 @@ public void alterPartition(String databaseName, String tableName, PartitionWithS } @Override - public void createRole(String role, String grantor) + public void createRole(MetastoreContext metastoreContext, String role, String grantor) { throw new PrestoException(NOT_SUPPORTED, "createRole is not supported by Glue"); } @Override - public void dropRole(String role) + public void dropRole(MetastoreContext metastoreContext, String role) { throw new PrestoException(NOT_SUPPORTED, "dropRole is not supported by Glue"); } @Override - public Set listRoles() + public Set listRoles(MetastoreContext metastoreContext) { return ImmutableSet.of(PUBLIC_ROLE_NAME); } @Override - public void grantRoles(Set roles, Set grantees, boolean withAdminOption, PrestoPrincipal grantor) + public void grantRoles(MetastoreContext metastoreContext, Set roles, Set grantees, boolean withAdminOption, PrestoPrincipal grantor) { throw new PrestoException(NOT_SUPPORTED, "grantRoles is not supported by Glue"); } @Override - public void revokeRoles(Set roles, Set grantees, boolean adminOptionFor, PrestoPrincipal grantor) + public void revokeRoles(MetastoreContext metastoreContext, Set roles, Set grantees, boolean adminOptionFor, PrestoPrincipal grantor) { throw new PrestoException(NOT_SUPPORTED, "revokeRoles is not supported by Glue"); } @Override - public Set listRoleGrants(PrestoPrincipal principal) + public Set listRoleGrants(MetastoreContext metastoreContext, PrestoPrincipal principal) { if (principal.getType() == USER) { return ImmutableSet.of(new RoleGrant(principal, PUBLIC_ROLE_NAME, false)); @@ -954,19 +957,19 @@ public Set listRoleGrants(PrestoPrincipal principal) } @Override - public void grantTablePrivileges(String databaseName, String tableName, PrestoPrincipal grantee, Set privileges) + public void grantTablePrivileges(MetastoreContext metastoreContext, String databaseName, String tableName, PrestoPrincipal grantee, Set privileges) { throw new PrestoException(NOT_SUPPORTED, "grantTablePrivileges is not supported by Glue"); } @Override - public void revokeTablePrivileges(String databaseName, String tableName, PrestoPrincipal grantee, Set privileges) + public void revokeTablePrivileges(MetastoreContext metastoreContext, String databaseName, String tableName, PrestoPrincipal grantee, Set privileges) { throw new PrestoException(NOT_SUPPORTED, "revokeTablePrivileges is not supported by Glue"); } @Override - public Set listTablePrivileges(String databaseName, String tableName, PrestoPrincipal principal) + public Set listTablePrivileges(MetastoreContext metastoreContext, String databaseName, String tableName, PrestoPrincipal principal) { throw new PrestoException(NOT_SUPPORTED, "listTablePrivileges is not supported by Glue"); } diff --git a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/BridgingHiveMetastore.java b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/BridgingHiveMetastore.java index 3bb6963dd8e6f..daaaf365c78e2 100644 --- a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/BridgingHiveMetastore.java +++ b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/BridgingHiveMetastore.java @@ -21,6 +21,7 @@ import com.facebook.presto.hive.metastore.Database; import com.facebook.presto.hive.metastore.ExtendedHiveMetastore; import com.facebook.presto.hive.metastore.HivePrivilegeInfo; +import com.facebook.presto.hive.metastore.MetastoreContext; import com.facebook.presto.hive.metastore.MetastoreUtil; import com.facebook.presto.hive.metastore.Partition; import com.facebook.presto.hive.metastore.PartitionNameWithVersion; @@ -74,91 +75,91 @@ public BridgingHiveMetastore(HiveMetastore delegate, PartitionMutator partitionM } @Override - public Optional getDatabase(String databaseName) + public Optional getDatabase(MetastoreContext metastoreContext, String databaseName) { - return delegate.getDatabase(databaseName).map(ThriftMetastoreUtil::fromMetastoreApiDatabase); + return delegate.getDatabase(metastoreContext, databaseName).map(ThriftMetastoreUtil::fromMetastoreApiDatabase); } @Override - public List getAllDatabases() + public List getAllDatabases(MetastoreContext metastoreContext) { - return delegate.getAllDatabases(); + return delegate.getAllDatabases(metastoreContext); } @Override - public Optional
getTable(String databaseName, String tableName) + public Optional
getTable(MetastoreContext metastoreContext, String databaseName, String tableName) { - return delegate.getTable(databaseName, tableName).map(table -> { + return delegate.getTable(metastoreContext, databaseName, tableName).map(table -> { if (isAvroTableWithSchemaSet(table) || isCsvTable(table)) { - return fromMetastoreApiTable(table, delegate.getFields(databaseName, tableName).get()); + return fromMetastoreApiTable(table, delegate.getFields(metastoreContext, databaseName, tableName).get()); } return fromMetastoreApiTable(table); }); } @Override - public Set getSupportedColumnStatistics(Type type) + public Set getSupportedColumnStatistics(MetastoreContext metastoreContext, Type type) { - return delegate.getSupportedColumnStatistics(type); + return delegate.getSupportedColumnStatistics(metastoreContext, type); } @Override - public PartitionStatistics getTableStatistics(String databaseName, String tableName) + public PartitionStatistics getTableStatistics(MetastoreContext metastoreContext, String databaseName, String tableName) { - return delegate.getTableStatistics(databaseName, tableName); + return delegate.getTableStatistics(metastoreContext, databaseName, tableName); } @Override - public Map getPartitionStatistics(String databaseName, String tableName, Set partitionNames) + public Map getPartitionStatistics(MetastoreContext metastoreContext, String databaseName, String tableName, Set partitionNames) { - return delegate.getPartitionStatistics(databaseName, tableName, partitionNames); + return delegate.getPartitionStatistics(metastoreContext, databaseName, tableName, partitionNames); } @Override - public void updateTableStatistics(String databaseName, String tableName, Function update) + public void updateTableStatistics(MetastoreContext metastoreContext, String databaseName, String tableName, Function update) { - delegate.updateTableStatistics(databaseName, tableName, update); + delegate.updateTableStatistics(metastoreContext, databaseName, tableName, update); } @Override - public void updatePartitionStatistics(String databaseName, String tableName, String partitionName, Function update) + public void updatePartitionStatistics(MetastoreContext metastoreContext, String databaseName, String tableName, String partitionName, Function update) { - delegate.updatePartitionStatistics(databaseName, tableName, partitionName, update); + delegate.updatePartitionStatistics(metastoreContext, databaseName, tableName, partitionName, update); } @Override - public Optional> getAllTables(String databaseName) + public Optional> getAllTables(MetastoreContext metastoreContext, String databaseName) { - return delegate.getAllTables(databaseName); + return delegate.getAllTables(metastoreContext, databaseName); } @Override - public Optional> getAllViews(String databaseName) + public Optional> getAllViews(MetastoreContext metastoreContext, String databaseName) { - return delegate.getAllViews(databaseName); + return delegate.getAllViews(metastoreContext, databaseName); } @Override - public void createDatabase(Database database) + public void createDatabase(MetastoreContext metastoreContext, Database database) { - delegate.createDatabase(toMetastoreApiDatabase(database)); + delegate.createDatabase(metastoreContext, toMetastoreApiDatabase(database)); } @Override - public void dropDatabase(String databaseName) + public void dropDatabase(MetastoreContext metastoreContext, String databaseName) { - delegate.dropDatabase(databaseName); + delegate.dropDatabase(metastoreContext, databaseName); } @Override - public void renameDatabase(String databaseName, String newDatabaseName) + public void renameDatabase(MetastoreContext metastoreContext, String databaseName, String newDatabaseName) { - org.apache.hadoop.hive.metastore.api.Database database = delegate.getDatabase(databaseName) + org.apache.hadoop.hive.metastore.api.Database database = delegate.getDatabase(metastoreContext, databaseName) .orElseThrow(() -> new SchemaNotFoundException(databaseName)); database.setName(newDatabaseName); - delegate.alterDatabase(databaseName, database); + delegate.alterDatabase(metastoreContext, databaseName, database); - delegate.getDatabase(databaseName).ifPresent(newDatabase -> { + delegate.getDatabase(metastoreContext, databaseName).ifPresent(newDatabase -> { if (newDatabase.getName().equals(databaseName)) { throw new PrestoException(NOT_SUPPORTED, "Hive metastore does not support renaming schemas"); } @@ -166,55 +167,55 @@ public void renameDatabase(String databaseName, String newDatabaseName) } @Override - public void createTable(Table table, PrincipalPrivileges principalPrivileges) + public void createTable(MetastoreContext metastoreContext, Table table, PrincipalPrivileges principalPrivileges) { checkArgument(!table.getTableType().equals(TEMPORARY_TABLE), "temporary tables must never be stored in the metastore"); - delegate.createTable(toMetastoreApiTable(table, principalPrivileges)); + delegate.createTable(metastoreContext, toMetastoreApiTable(table, principalPrivileges)); } @Override - public void dropTable(String databaseName, String tableName, boolean deleteData) + public void dropTable(MetastoreContext metastoreContext, String databaseName, String tableName, boolean deleteData) { - delegate.dropTable(databaseName, tableName, deleteData); + delegate.dropTable(metastoreContext, databaseName, tableName, deleteData); } @Override - public void replaceTable(String databaseName, String tableName, Table newTable, PrincipalPrivileges principalPrivileges) + public void replaceTable(MetastoreContext metastoreContext, String databaseName, String tableName, Table newTable, PrincipalPrivileges principalPrivileges) { checkArgument(!newTable.getTableType().equals(TEMPORARY_TABLE), "temporary tables must never be stored in the metastore"); - alterTable(databaseName, tableName, toMetastoreApiTable(newTable, principalPrivileges)); + alterTable(metastoreContext, databaseName, tableName, toMetastoreApiTable(newTable, principalPrivileges)); } @Override - public void renameTable(String databaseName, String tableName, String newDatabaseName, String newTableName) + public void renameTable(MetastoreContext metastoreContext, String databaseName, String tableName, String newDatabaseName, String newTableName) { - Optional source = delegate.getTable(databaseName, tableName); + Optional source = delegate.getTable(metastoreContext, databaseName, tableName); if (!source.isPresent()) { throw new TableNotFoundException(new SchemaTableName(databaseName, tableName)); } org.apache.hadoop.hive.metastore.api.Table table = source.get(); table.setDbName(newDatabaseName); table.setTableName(newTableName); - alterTable(databaseName, tableName, table); + alterTable(metastoreContext, databaseName, tableName, table); } @Override - public void addColumn(String databaseName, String tableName, String columnName, HiveType columnType, String columnComment) + public void addColumn(MetastoreContext metastoreContext, String databaseName, String tableName, String columnName, HiveType columnType, String columnComment) { - Optional source = delegate.getTable(databaseName, tableName); + Optional source = delegate.getTable(metastoreContext, databaseName, tableName); if (!source.isPresent()) { throw new TableNotFoundException(new SchemaTableName(databaseName, tableName)); } org.apache.hadoop.hive.metastore.api.Table table = source.get(); table.getSd().getCols().add( new FieldSchema(columnName, columnType.getHiveTypeName().toString(), columnComment)); - alterTable(databaseName, tableName, table); + alterTable(metastoreContext, databaseName, tableName, table); } @Override - public void renameColumn(String databaseName, String tableName, String oldColumnName, String newColumnName) + public void renameColumn(MetastoreContext metastoreContext, String databaseName, String tableName, String oldColumnName, String newColumnName) { - Optional source = delegate.getTable(databaseName, tableName); + Optional source = delegate.getTable(metastoreContext, databaseName, tableName); if (!source.isPresent()) { throw new TableNotFoundException(new SchemaTableName(databaseName, tableName)); } @@ -229,56 +230,58 @@ public void renameColumn(String databaseName, String tableName, String oldColumn fieldSchema.setName(newColumnName); } } - alterTable(databaseName, tableName, table); + alterTable(metastoreContext, databaseName, tableName, table); } @Override - public void dropColumn(String databaseName, String tableName, String columnName) + public void dropColumn(MetastoreContext metastoreContext, String databaseName, String tableName, String columnName) { - verifyCanDropColumn(this, databaseName, tableName, columnName); - org.apache.hadoop.hive.metastore.api.Table table = delegate.getTable(databaseName, tableName) + verifyCanDropColumn(this, metastoreContext, databaseName, tableName, columnName); + org.apache.hadoop.hive.metastore.api.Table table = delegate.getTable(metastoreContext, databaseName, tableName) .orElseThrow(() -> new TableNotFoundException(new SchemaTableName(databaseName, tableName))); table.getSd().getCols().removeIf(fieldSchema -> fieldSchema.getName().equals(columnName)); - alterTable(databaseName, tableName, table); + alterTable(metastoreContext, databaseName, tableName, table); } - private void alterTable(String databaseName, String tableName, org.apache.hadoop.hive.metastore.api.Table table) + private void alterTable(MetastoreContext metastoreContext, String databaseName, String tableName, org.apache.hadoop.hive.metastore.api.Table table) { - delegate.alterTable(databaseName, tableName, table); + delegate.alterTable(metastoreContext, databaseName, tableName, table); } @Override - public Optional getPartition(String databaseName, String tableName, List partitionValues) + public Optional getPartition(MetastoreContext metastoreContext, String databaseName, String tableName, List partitionValues) { - return delegate.getPartition(databaseName, tableName, partitionValues).map(partition -> fromMetastoreApiPartition(partition, partitionMutator)); + return delegate.getPartition(metastoreContext, databaseName, tableName, partitionValues).map(partition -> fromMetastoreApiPartition(partition, partitionMutator)); } @Override - public Optional> getPartitionNames(String databaseName, String tableName) + public Optional> getPartitionNames(MetastoreContext metastoreContext, String databaseName, String tableName) { - return delegate.getPartitionNames(databaseName, tableName); + return delegate.getPartitionNames(metastoreContext, databaseName, tableName); } @Override public List getPartitionNamesByFilter( + MetastoreContext metastoreContext, String databaseName, String tableName, Map partitionPredicates) { - return delegate.getPartitionNamesByFilter(databaseName, tableName, partitionPredicates); + return delegate.getPartitionNamesByFilter(metastoreContext, databaseName, tableName, partitionPredicates); } @Override public List getPartitionNamesWithVersionByFilter( + MetastoreContext metastoreContext, String databaseName, String tableName, Map partitionPredicates) { - return delegate.getPartitionNamesWithVersionByFilter(databaseName, tableName, partitionPredicates); + return delegate.getPartitionNamesWithVersionByFilter(metastoreContext, databaseName, tableName, partitionPredicates); } @Override - public Map> getPartitionsByNames(String databaseName, String tableName, List partitionNames) + public Map> getPartitionsByNames(MetastoreContext metastoreContext, String databaseName, String tableName, List partitionNames) { requireNonNull(partitionNames, "partitionNames is null"); if (partitionNames.isEmpty()) { @@ -286,7 +289,7 @@ public Map> getPartitionsByNames(String databaseName } Map> partitionNameToPartitionValuesMap = partitionNames.stream() .collect(Collectors.toMap(identity(), MetastoreUtil::toPartitionValues)); - Map, Partition> partitionValuesToPartitionMap = delegate.getPartitionsByNames(databaseName, tableName, partitionNames).stream() + Map, Partition> partitionValuesToPartitionMap = delegate.getPartitionsByNames(metastoreContext, databaseName, tableName, partitionNames).stream() .map(partition -> fromMetastoreApiPartition(partition, partitionMutator)) .collect(Collectors.toMap(Partition::getValues, identity())); ImmutableMap.Builder> resultBuilder = ImmutableMap.builder(); @@ -298,74 +301,74 @@ public Map> getPartitionsByNames(String databaseName } @Override - public void addPartitions(String databaseName, String tableName, List partitions) + public void addPartitions(MetastoreContext metastoreContext, String databaseName, String tableName, List partitions) { - delegate.addPartitions(databaseName, tableName, partitions); + delegate.addPartitions(metastoreContext, databaseName, tableName, partitions); } @Override - public void dropPartition(String databaseName, String tableName, List parts, boolean deleteData) + public void dropPartition(MetastoreContext metastoreContext, String databaseName, String tableName, List parts, boolean deleteData) { - delegate.dropPartition(databaseName, tableName, parts, deleteData); + delegate.dropPartition(metastoreContext, databaseName, tableName, parts, deleteData); } @Override - public void alterPartition(String databaseName, String tableName, PartitionWithStatistics partition) + public void alterPartition(MetastoreContext metastoreContext, String databaseName, String tableName, PartitionWithStatistics partition) { - delegate.alterPartition(databaseName, tableName, partition); + delegate.alterPartition(metastoreContext, databaseName, tableName, partition); } @Override - public void createRole(String role, String grantor) + public void createRole(MetastoreContext metastoreContext, String role, String grantor) { - delegate.createRole(role, grantor); + delegate.createRole(metastoreContext, role, grantor); } @Override - public void dropRole(String role) + public void dropRole(MetastoreContext metastoreContext, String role) { - delegate.dropRole(role); + delegate.dropRole(metastoreContext, role); } @Override - public Set listRoles() + public Set listRoles(MetastoreContext metastoreContext) { - return delegate.listRoles(); + return delegate.listRoles(metastoreContext); } @Override - public void grantRoles(Set roles, Set grantees, boolean withAdminOption, PrestoPrincipal grantor) + public void grantRoles(MetastoreContext metastoreContext, Set roles, Set grantees, boolean withAdminOption, PrestoPrincipal grantor) { - delegate.grantRoles(roles, grantees, withAdminOption, grantor); + delegate.grantRoles(metastoreContext, roles, grantees, withAdminOption, grantor); } @Override - public void revokeRoles(Set roles, Set grantees, boolean adminOptionFor, PrestoPrincipal grantor) + public void revokeRoles(MetastoreContext metastoreContext, Set roles, Set grantees, boolean adminOptionFor, PrestoPrincipal grantor) { - delegate.revokeRoles(roles, grantees, adminOptionFor, grantor); + delegate.revokeRoles(metastoreContext, roles, grantees, adminOptionFor, grantor); } @Override - public Set listRoleGrants(PrestoPrincipal principal) + public Set listRoleGrants(MetastoreContext metastoreContext, PrestoPrincipal principal) { - return delegate.listRoleGrants(principal); + return delegate.listRoleGrants(metastoreContext, principal); } @Override - public void grantTablePrivileges(String databaseName, String tableName, PrestoPrincipal grantee, Set privileges) + public void grantTablePrivileges(MetastoreContext metastoreContext, String databaseName, String tableName, PrestoPrincipal grantee, Set privileges) { - delegate.grantTablePrivileges(databaseName, tableName, grantee, privileges); + delegate.grantTablePrivileges(metastoreContext, databaseName, tableName, grantee, privileges); } @Override - public void revokeTablePrivileges(String databaseName, String tableName, PrestoPrincipal grantee, Set privileges) + public void revokeTablePrivileges(MetastoreContext metastoreContext, String databaseName, String tableName, PrestoPrincipal grantee, Set privileges) { - delegate.revokeTablePrivileges(databaseName, tableName, grantee, privileges); + delegate.revokeTablePrivileges(metastoreContext, databaseName, tableName, grantee, privileges); } @Override - public Set listTablePrivileges(String databaseName, String tableName, PrestoPrincipal principal) + public Set listTablePrivileges(MetastoreContext metastoreContext, String databaseName, String tableName, PrestoPrincipal principal) { - return delegate.listTablePrivileges(databaseName, tableName, principal); + return delegate.listTablePrivileges(metastoreContext, databaseName, tableName, principal); } } diff --git a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/HiveCluster.java b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/HiveCluster.java index dfa6e04fa9565..0a5b330285eca 100644 --- a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/HiveCluster.java +++ b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/HiveCluster.java @@ -15,6 +15,8 @@ import org.apache.thrift.TException; +import java.util.Optional; + /** * A Hive cluster is a single logical installation of Hive. It might * have multiple instances of the metastore service (for scalability @@ -29,6 +31,6 @@ public interface HiveCluster /** * Create a connected {@link HiveMetastoreClient} to this HiveCluster */ - HiveMetastoreClient createMetastoreClient() + HiveMetastoreClient createMetastoreClient(Optional token) throws TException; } diff --git a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/HiveMetastore.java b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/HiveMetastore.java index b70c3bb0285a3..0c9bb31c871ba 100644 --- a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/HiveMetastore.java +++ b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/HiveMetastore.java @@ -17,6 +17,7 @@ import com.facebook.presto.common.type.Type; import com.facebook.presto.hive.metastore.Column; import com.facebook.presto.hive.metastore.HivePrivilegeInfo; +import com.facebook.presto.hive.metastore.MetastoreContext; import com.facebook.presto.hive.metastore.PartitionNameWithVersion; import com.facebook.presto.hive.metastore.PartitionStatistics; import com.facebook.presto.hive.metastore.PartitionWithStatistics; @@ -41,87 +42,87 @@ public interface HiveMetastore { - void createDatabase(Database database); + void createDatabase(MetastoreContext metastoreContext, Database database); - void dropDatabase(String databaseName); + void dropDatabase(MetastoreContext metastoreContext, String databaseName); - void alterDatabase(String databaseName, Database database); + void alterDatabase(MetastoreContext metastoreContext, String databaseName, Database database); - void createTable(Table table); + void createTable(MetastoreContext metastoreContext, Table table); - void dropTable(String databaseName, String tableName, boolean deleteData); + void dropTable(MetastoreContext metastoreContext, String databaseName, String tableName, boolean deleteData); - void alterTable(String databaseName, String tableName, Table table); + void alterTable(MetastoreContext metastoreContext, String databaseName, String tableName, Table table); - List getAllDatabases(); + List getAllDatabases(MetastoreContext metastoreContext); - Optional> getAllTables(String databaseName); + Optional> getAllTables(MetastoreContext metastoreContext, String databaseName); - Optional> getAllViews(String databaseName); + Optional> getAllViews(MetastoreContext metastoreContext, String databaseName); - Optional getDatabase(String databaseName); + Optional getDatabase(MetastoreContext metastoreContext, String databaseName); - void addPartitions(String databaseName, String tableName, List partitions); + void addPartitions(MetastoreContext metastoreContext, String databaseName, String tableName, List partitions); - void dropPartition(String databaseName, String tableName, List parts, boolean deleteData); + void dropPartition(MetastoreContext metastoreContext, String databaseName, String tableName, List parts, boolean deleteData); - void alterPartition(String databaseName, String tableName, PartitionWithStatistics partition); + void alterPartition(MetastoreContext metastoreContext, String databaseName, String tableName, PartitionWithStatistics partition); - Optional> getPartitionNames(String databaseName, String tableName); + Optional> getPartitionNames(MetastoreContext metastoreContext, String databaseName, String tableName); - Optional> getPartitionNamesByParts(String databaseName, String tableName, List parts); + Optional> getPartitionNamesByParts(MetastoreContext metastoreContext, String databaseName, String tableName, List parts); - List getPartitionNamesByFilter(String databaseName, String tableName, Map partitionPredicates); + List getPartitionNamesByFilter(MetastoreContext metastoreContext, String databaseName, String tableName, Map partitionPredicates); - default List getPartitionNamesWithVersionByFilter(String databaseName, String tableName, Map partitionPredicates) + default List getPartitionNamesWithVersionByFilter(MetastoreContext metastoreContext, String databaseName, String tableName, Map partitionPredicates) { throw new UnsupportedOperationException(); } - Optional getPartition(String databaseName, String tableName, List partitionValues); + Optional getPartition(MetastoreContext metastoreContext, String databaseName, String tableName, List partitionValues); - List getPartitionsByNames(String databaseName, String tableName, List partitionNames); + List getPartitionsByNames(MetastoreContext metastoreContext, String databaseName, String tableName, List partitionNames); - Optional
getTable(String databaseName, String tableName); + Optional
getTable(MetastoreContext metastoreContext, String databaseName, String tableName); - Set getSupportedColumnStatistics(Type type); + Set getSupportedColumnStatistics(MetastoreContext metastoreContext, Type type); - PartitionStatistics getTableStatistics(String databaseName, String tableName); + PartitionStatistics getTableStatistics(MetastoreContext metastoreContext, String databaseName, String tableName); - Map getPartitionStatistics(String databaseName, String tableName, Set partitionNames); + Map getPartitionStatistics(MetastoreContext metastoreContext, String databaseName, String tableName, Set partitionNames); - void updateTableStatistics(String databaseName, String tableName, Function update); + void updateTableStatistics(MetastoreContext metastoreContext, String databaseName, String tableName, Function update); - void updatePartitionStatistics(String databaseName, String tableName, String partitionName, Function update); + void updatePartitionStatistics(MetastoreContext metastoreContext, String databaseName, String tableName, String partitionName, Function update); - void createRole(String role, String grantor); + void createRole(MetastoreContext metastoreContext, String role, String grantor); - void dropRole(String role); + void dropRole(MetastoreContext metastoreContext, String role); - Set listRoles(); + Set listRoles(MetastoreContext metastoreContext); - void grantRoles(Set roles, Set grantees, boolean withAdminOption, PrestoPrincipal grantor); + void grantRoles(MetastoreContext metastoreContext, Set roles, Set grantees, boolean withAdminOption, PrestoPrincipal grantor); - void revokeRoles(Set roles, Set grantees, boolean adminOptionFor, PrestoPrincipal grantor); + void revokeRoles(MetastoreContext metastoreContext, Set roles, Set grantees, boolean adminOptionFor, PrestoPrincipal grantor); - Set listRoleGrants(PrestoPrincipal principal); + Set listRoleGrants(MetastoreContext metastoreContext, PrestoPrincipal principal); - void grantTablePrivileges(String databaseName, String tableName, PrestoPrincipal grantee, Set privileges); + void grantTablePrivileges(MetastoreContext metastoreContext, String databaseName, String tableName, PrestoPrincipal grantee, Set privileges); - void revokeTablePrivileges(String databaseName, String tableName, PrestoPrincipal grantee, Set privileges); + void revokeTablePrivileges(MetastoreContext metastoreContext, String databaseName, String tableName, PrestoPrincipal grantee, Set privileges); - Set listTablePrivileges(String databaseName, String tableName, PrestoPrincipal principal); + Set listTablePrivileges(MetastoreContext metastoreContext, String databaseName, String tableName, PrestoPrincipal principal); - default boolean isTableOwner(String user, String databaseName, String tableName) + default boolean isTableOwner(MetastoreContext metastoreContext, String user, String databaseName, String tableName) { // a table can only be owned by a user - Optional
table = getTable(databaseName, tableName); + Optional
table = getTable(metastoreContext, databaseName, tableName); return table.isPresent() && user.equals(table.get().getOwner()); } - default Optional> getFields(String databaseName, String tableName) + default Optional> getFields(MetastoreContext metastoreContext, String databaseName, String tableName) { - Optional
table = getTable(databaseName, tableName); + Optional
table = getTable(metastoreContext, databaseName, tableName); if (!table.isPresent()) { throw new TableNotFoundException(new SchemaTableName(databaseName, tableName)); } diff --git a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/HiveMetastoreApiStats.java b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/HiveMetastoreApiStats.java index e8c5bc28291e2..88730f0ba0d04 100644 --- a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/HiveMetastoreApiStats.java +++ b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/HiveMetastoreApiStats.java @@ -35,6 +35,12 @@ public class HiveMetastoreApiStats private final CounterStat metastoreExceptions = new CounterStat(); private final CounterStat thriftExceptions = new CounterStat(); + public V execute(Callable callable) + throws Exception + { + return wrap(callable).call(); + } + public Callable wrap(Callable callable) { return () -> { diff --git a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/HiveMetastoreClient.java b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/HiveMetastoreClient.java index c2ed66a74c17c..37d233e1b99f1 100644 --- a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/HiveMetastoreClient.java +++ b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/HiveMetastoreClient.java @@ -36,6 +36,9 @@ public interface HiveMetastoreClient @Override void close(); + String getDelegationToken(String owner, String renewer) + throws TException; + List getAllDatabases() throws TException; diff --git a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/HiveMetastoreClientFactory.java b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/HiveMetastoreClientFactory.java index 4182c2dfbf03d..37e16099d1cff 100644 --- a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/HiveMetastoreClientFactory.java +++ b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/HiveMetastoreClientFactory.java @@ -52,9 +52,9 @@ public HiveMetastoreClientFactory(MetastoreClientConfig metastoreClientConfig, H this(Optional.empty(), Optional.ofNullable(metastoreClientConfig.getMetastoreSocksProxy()), metastoreClientConfig.getMetastoreTimeout(), metastoreAuthentication); } - public HiveMetastoreClient create(HostAndPort address) + public HiveMetastoreClient create(HostAndPort address, Optional token) throws TTransportException { - return new ThriftHiveMetastoreClient(Transport.create(address, sslContext, socksProxy, timeoutMillis, metastoreAuthentication)); + return new ThriftHiveMetastoreClient(Transport.create(address, sslContext, socksProxy, timeoutMillis, metastoreAuthentication, token)); } } diff --git a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/StaticHiveCluster.java b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/StaticHiveCluster.java index b3b69cb1e2bbd..86a037f9644f2 100644 --- a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/StaticHiveCluster.java +++ b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/StaticHiveCluster.java @@ -22,6 +22,7 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.Optional; import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.base.Strings.isNullOrEmpty; @@ -34,16 +35,18 @@ public class StaticHiveCluster private final List addresses; private final HiveMetastoreClientFactory clientFactory; private final String metastoreUsername; + private final boolean metastoreLoadBalancingEnabled; @Inject public StaticHiveCluster(StaticMetastoreConfig config, HiveMetastoreClientFactory clientFactory) { - this(config.getMetastoreUris(), config.getMetastoreUsername(), clientFactory); + this(config.getMetastoreUris(), config.isMetastoreLoadBalancingEnabled(), config.getMetastoreUsername(), clientFactory); } - public StaticHiveCluster(List metastoreUris, String metastoreUsername, HiveMetastoreClientFactory clientFactory) + public StaticHiveCluster(List metastoreUris, boolean metastoreLoadBalancingEnabled, String metastoreUsername, HiveMetastoreClientFactory clientFactory) { requireNonNull(metastoreUris, "metastoreUris is null"); + this.metastoreLoadBalancingEnabled = metastoreLoadBalancingEnabled; checkArgument(!metastoreUris.isEmpty(), "metastoreUris must specify at least one URI"); this.addresses = metastoreUris.stream() .map(StaticHiveCluster::checkMetastoreUri) @@ -62,16 +65,19 @@ public StaticHiveCluster(List metastoreUris, String metastoreUsername, Hive * connection succeeds or there are no more fallback metastores. */ @Override - public HiveMetastoreClient createMetastoreClient() + public HiveMetastoreClient createMetastoreClient(Optional token) throws TException { List metastores = new ArrayList<>(addresses); - Collections.shuffle(metastores.subList(1, metastores.size())); + if (metastoreLoadBalancingEnabled) { + Collections.shuffle(metastores); + } TException lastException = null; for (HostAndPort metastore : metastores) { try { - HiveMetastoreClient client = clientFactory.create(metastore); + HiveMetastoreClient client = clientFactory.create(metastore, token); + if (!isNullOrEmpty(metastoreUsername)) { client.setUGI(metastoreUsername); } diff --git a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/StaticMetastoreConfig.java b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/StaticMetastoreConfig.java index fd8c6d09b4d5e..f1f2f6d5d4394 100644 --- a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/StaticMetastoreConfig.java +++ b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/StaticMetastoreConfig.java @@ -31,6 +31,7 @@ public class StaticMetastoreConfig private List metastoreUris; private String metastoreUsername; + private boolean metastoreLoadBalancingEnabled; @NotNull public List getMetastoreUris() @@ -63,4 +64,17 @@ public StaticMetastoreConfig setMetastoreUsername(String metastoreUsername) this.metastoreUsername = metastoreUsername; return this; } + + public boolean isMetastoreLoadBalancingEnabled() + { + return metastoreLoadBalancingEnabled; + } + + @Config("hive.metastore.load-balancing-enabled") + @ConfigDescription("Enable load balancing between multiple Metastore instances") + public StaticMetastoreConfig setMetastoreLoadBalancingEnabled(boolean enabled) + { + this.metastoreLoadBalancingEnabled = enabled; + return this; + } } diff --git a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/ThriftHiveMetastore.java b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/ThriftHiveMetastore.java index cb1c2566f238b..0166f672eac64 100644 --- a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/ThriftHiveMetastore.java +++ b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/ThriftHiveMetastore.java @@ -18,6 +18,7 @@ import com.facebook.presto.hive.HiveBasicStatistics; import com.facebook.presto.hive.HiveType; import com.facebook.presto.hive.HiveViewNotSupportedException; +import com.facebook.presto.hive.MetastoreClientConfig; import com.facebook.presto.hive.PartitionNotFoundException; import com.facebook.presto.hive.RetryDriver; import com.facebook.presto.hive.SchemaAlreadyExistsException; @@ -25,6 +26,7 @@ import com.facebook.presto.hive.metastore.Column; import com.facebook.presto.hive.metastore.HiveColumnStatistics; import com.facebook.presto.hive.metastore.HivePrivilegeInfo; +import com.facebook.presto.hive.metastore.MetastoreContext; import com.facebook.presto.hive.metastore.MetastoreUtil; import com.facebook.presto.hive.metastore.PartitionStatistics; import com.facebook.presto.hive.metastore.PartitionWithStatistics; @@ -116,18 +118,33 @@ public class ThriftHiveMetastore private final ThriftHiveMetastoreStats stats; private final HiveCluster clientProvider; private final Function exceptionMapper; + private final boolean impersonationEnabled; @Inject - public ThriftHiveMetastore(HiveCluster hiveCluster) + public ThriftHiveMetastore(HiveCluster hiveCluster, MetastoreClientConfig config) { - this(hiveCluster, new ThriftHiveMetastoreStats(), identity()); + this( + hiveCluster, + new ThriftHiveMetastoreStats(), + identity(), + requireNonNull(config, "config is null").isMetastoreImpersonationEnabled()); } - public ThriftHiveMetastore(HiveCluster hiveCluster, ThriftHiveMetastoreStats stats, Function exceptionMapper) + public ThriftHiveMetastore( + HiveCluster hiveCluster, + ThriftHiveMetastoreStats stats, + Function exceptionMapper, + boolean impersonationEnabled) { this.clientProvider = requireNonNull(hiveCluster, "hiveCluster is null"); this.stats = requireNonNull(stats, "stats is null"); this.exceptionMapper = requireNonNull(exceptionMapper, "exceptionMapper is null"); + this.impersonationEnabled = impersonationEnabled; + } + + private static boolean isPrestoView(Table table) + { + return "true".equals(table.getParameters().get(PRESTO_VIEW_FLAG)); } @Managed @@ -138,16 +155,13 @@ public ThriftHiveMetastoreStats getStats() } @Override - public List getAllDatabases() + public List getAllDatabases(MetastoreContext context) { try { return retry() .stopOnIllegalExceptions() - .run("getAllDatabases", stats.getGetAllDatabases().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - return client.getAllDatabases(); - } - })); + .run("getAllDatabases", stats.getGetAllDatabases().wrap(() -> + getMetastoreClientThenCall(context, HiveMetastoreClient::getAllDatabases))); } catch (TException e) { throw new PrestoException(HIVE_METASTORE_ERROR, e); @@ -158,17 +172,14 @@ public List getAllDatabases() } @Override - public Optional getDatabase(String databaseName) + public Optional getDatabase(MetastoreContext metastoreContext, String databaseName) { try { return retry() .stopOn(NoSuchObjectException.class) .stopOnIllegalExceptions() - .run("getDatabase", stats.getGetDatabase().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - return Optional.of(client.getDatabase(databaseName)); - } - })); + .run("getDatabase", stats.getGetDatabase().wrap(() -> + getMetastoreClientThenCall(metastoreContext, client -> Optional.of(client.getDatabase(databaseName))))); } catch (NoSuchObjectException e) { return Optional.empty(); @@ -182,19 +193,14 @@ public Optional getDatabase(String databaseName) } @Override - public Optional> getAllTables(String databaseName) + public Optional> getAllTables(MetastoreContext metastoreContext, String databaseName) { - Callable> getAllTables = stats.getGetAllTables().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - return client.getAllTables(databaseName); - } - }); + Callable> getAllTables = stats.getGetAllTables().wrap(() -> + getMetastoreClientThenCall(metastoreContext, client -> client.getAllTables(databaseName))); Callable getDatabase = stats.getGetDatabase().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - client.getDatabase(databaseName); - return null; - } + getMetastoreClientThenCall(metastoreContext, client -> client.getDatabase(databaseName)); + return null; }); try { @@ -222,21 +228,20 @@ public Optional> getAllTables(String databaseName) } @Override - public Optional
getTable(String databaseName, String tableName) + public Optional
getTable(MetastoreContext metastoreContext, String databaseName, String tableName) { try { return retry() .stopOn(NoSuchObjectException.class, HiveViewNotSupportedException.class) .stopOnIllegalExceptions() - .run("getTable", stats.getGetTable().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - Table table = client.getTable(databaseName, tableName); - if (table.getTableType().equals(TableType.VIRTUAL_VIEW.name()) && !isPrestoView(table)) { - throw new HiveViewNotSupportedException(new SchemaTableName(databaseName, tableName)); - } - return Optional.of(table); - } - })); + .run("getTable", stats.getGetTable().wrap(() -> + getMetastoreClientThenCall(metastoreContext, client -> { + Table table = client.getTable(databaseName, tableName); + if (table.getTableType().equals(TableType.VIRTUAL_VIEW.name()) && !isPrestoView(table)) { + throw new HiveViewNotSupportedException(new SchemaTableName(databaseName, tableName)); + } + return Optional.of(table); + }))); } catch (NoSuchObjectException e) { return Optional.empty(); @@ -250,40 +255,33 @@ public Optional
getTable(String databaseName, String tableName) } @Override - public Set getSupportedColumnStatistics(Type type) + public Set getSupportedColumnStatistics(MetastoreContext metastoreContext, Type type) { return MetastoreUtil.getSupportedColumnStatistics(type); } - private static boolean isPrestoView(Table table) - { - return "true".equals(table.getParameters().get(PRESTO_VIEW_FLAG)); - } - @Override - public PartitionStatistics getTableStatistics(String databaseName, String tableName) + public PartitionStatistics getTableStatistics(MetastoreContext metastoreContext, String databaseName, String tableName) { - Table table = getTable(databaseName, tableName) + Table table = getTable(metastoreContext, databaseName, tableName) .orElseThrow(() -> new TableNotFoundException(new SchemaTableName(databaseName, tableName))); List dataColumns = table.getSd().getCols().stream() .map(FieldSchema::getName) .collect(toImmutableList()); HiveBasicStatistics basicStatistics = getHiveBasicStatistics(table.getParameters()); - Map columnStatistics = getTableColumnStatistics(databaseName, tableName, dataColumns, basicStatistics.getRowCount()); + Map columnStatistics = getTableColumnStatistics(metastoreContext, databaseName, tableName, dataColumns, basicStatistics.getRowCount()); return new PartitionStatistics(basicStatistics, columnStatistics); } - private Map getTableColumnStatistics(String databaseName, String tableName, List columns, OptionalLong rowCount) + private Map getTableColumnStatistics(MetastoreContext metastoreContext, String databaseName, String tableName, List columns, OptionalLong rowCount) { try { return retry() .stopOn(NoSuchObjectException.class, HiveViewNotSupportedException.class) .stopOnIllegalExceptions() - .run("getTableColumnStatistics", stats.getGetTableColumnStatistics().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - return groupStatisticsByColumn(client.getTableColumnStatistics(databaseName, tableName, columns), rowCount); - } - })); + .run("getTableColumnStatistics", stats.getGetTableColumnStatistics().wrap(() -> + getMetastoreClientThenCall(metastoreContext, client -> + groupStatisticsByColumn(client.getTableColumnStatistics(databaseName, tableName, columns), rowCount)))); } catch (NoSuchObjectException e) { throw new TableNotFoundException(new SchemaTableName(databaseName, tableName)); @@ -297,9 +295,9 @@ private Map getTableColumnStatistics(String databa } @Override - public Map getPartitionStatistics(String databaseName, String tableName, Set partitionNames) + public Map getPartitionStatistics(MetastoreContext metastoreContext, String databaseName, String tableName, Set partitionNames) { - Table table = getTable(databaseName, tableName) + Table table = getTable(metastoreContext, databaseName, tableName) .orElseThrow(() -> new TableNotFoundException(new SchemaTableName(databaseName, tableName))); List dataColumns = table.getSd().getCols().stream() .map(FieldSchema::getName) @@ -308,13 +306,14 @@ public Map getPartitionStatistics(String databaseNa .map(FieldSchema::getName) .collect(toImmutableList()); - Map partitionBasicStatistics = getPartitionsByNames(databaseName, tableName, ImmutableList.copyOf(partitionNames)).stream() + Map partitionBasicStatistics = getPartitionsByNames(metastoreContext, databaseName, tableName, ImmutableList.copyOf(partitionNames)).stream() .collect(toImmutableMap( partition -> makePartName(partitionColumns, partition.getValues()), partition -> getHiveBasicStatistics(partition.getParameters()))); Map partitionRowCounts = partitionBasicStatistics.entrySet().stream() .collect(toImmutableMap(Map.Entry::getKey, entry -> entry.getValue().getRowCount())); Map> partitionColumnStatistics = getPartitionColumnStatistics( + metastoreContext, databaseName, tableName, partitionNames, @@ -331,17 +330,15 @@ public Map getPartitionStatistics(String databaseNa } @Override - public Optional> getFields(String databaseName, String tableName) + public Optional> getFields(MetastoreContext metastoreContext, String databaseName, String tableName) { try { return retry() .stopOn(MetaException.class, UnknownTableException.class, UnknownDBException.class) .stopOnIllegalExceptions() - .run("getFields", stats.getGetFields().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - return Optional.of(ImmutableList.copyOf(client.getFields(databaseName, tableName))); - } - })); + .run("getFields", stats.getGetFields().wrap(() -> + getMetastoreClientThenCall(metastoreContext, client -> + Optional.of(ImmutableList.copyOf(client.getFields(databaseName, tableName)))))); } catch (NoSuchObjectException e) { return Optional.empty(); @@ -355,30 +352,29 @@ public Optional> getFields(String databaseName, String tableNa } private Map> getPartitionColumnStatistics( + MetastoreContext metastoreContext, String databaseName, String tableName, Set partitionNames, List columnNames, Map partitionRowCounts) { - return getMetastorePartitionColumnStatistics(databaseName, tableName, partitionNames, columnNames).entrySet().stream() + return getMetastorePartitionColumnStatistics(metastoreContext, databaseName, tableName, partitionNames, columnNames).entrySet().stream() .filter(entry -> !entry.getValue().isEmpty()) .collect(toImmutableMap( Map.Entry::getKey, entry -> groupStatisticsByColumn(entry.getValue(), partitionRowCounts.getOrDefault(entry.getKey(), OptionalLong.empty())))); } - private Map> getMetastorePartitionColumnStatistics(String databaseName, String tableName, Set partitionNames, List columnNames) + private Map> getMetastorePartitionColumnStatistics(MetastoreContext metastoreContext, String databaseName, String tableName, Set partitionNames, List columnNames) { try { return retry() .stopOn(NoSuchObjectException.class, HiveViewNotSupportedException.class) .stopOnIllegalExceptions() - .run("getPartitionColumnStatistics", stats.getGetPartitionColumnStatistics().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - return client.getPartitionColumnStatistics(databaseName, tableName, ImmutableList.copyOf(partitionNames), columnNames); - } - })); + .run("getPartitionColumnStatistics", stats.getGetPartitionColumnStatistics().wrap(() -> + getMetastoreClientThenCall(metastoreContext, client -> + client.getPartitionColumnStatistics(databaseName, tableName, ImmutableList.copyOf(partitionNames), columnNames)))); } catch (NoSuchObjectException e) { throw new TableNotFoundException(new SchemaTableName(databaseName, tableName)); @@ -398,17 +394,17 @@ private Map groupStatisticsByColumn(List update) + public synchronized void updateTableStatistics(MetastoreContext metastoreContext, String databaseName, String tableName, Function update) { - PartitionStatistics currentStatistics = getTableStatistics(databaseName, tableName); + PartitionStatistics currentStatistics = getTableStatistics(metastoreContext, databaseName, tableName); PartitionStatistics updatedStatistics = update.apply(currentStatistics); - Table originalTable = getTable(databaseName, tableName) + Table originalTable = getTable(metastoreContext, databaseName, tableName) .orElseThrow(() -> new TableNotFoundException(new SchemaTableName(databaseName, tableName))); Table modifiedTable = originalTable.deepCopy(); HiveBasicStatistics basicStatistics = updatedStatistics.getBasicStatistics(); modifiedTable.setParameters(updateStatisticsParameters(modifiedTable.getParameters(), basicStatistics)); - alterTable(databaseName, tableName, modifiedTable); + alterTable(metastoreContext, databaseName, tableName, modifiedTable); com.facebook.presto.hive.metastore.Table table = fromMetastoreApiTable(modifiedTable); OptionalLong rowCount = basicStatistics.getRowCount(); @@ -417,24 +413,23 @@ public synchronized void updateTableStatistics(String databaseName, String table .map(entry -> createMetastoreColumnStatistics(entry.getKey(), table.getColumn(entry.getKey()).get().getType(), entry.getValue(), rowCount)) .collect(toImmutableList()); if (!metastoreColumnStatistics.isEmpty()) { - setTableColumnStatistics(databaseName, tableName, metastoreColumnStatistics); + setTableColumnStatistics(metastoreContext, databaseName, tableName, metastoreColumnStatistics); } Set removedColumnStatistics = difference(currentStatistics.getColumnStatistics().keySet(), updatedStatistics.getColumnStatistics().keySet()); - removedColumnStatistics.forEach(column -> deleteTableColumnStatistics(databaseName, tableName, column)); + removedColumnStatistics.forEach(column -> deleteTableColumnStatistics(metastoreContext, databaseName, tableName, column)); } - private void setTableColumnStatistics(String databaseName, String tableName, List statistics) + private void setTableColumnStatistics(MetastoreContext metastoreContext, String databaseName, String tableName, List statistics) { try { retry() .stopOn(NoSuchObjectException.class, InvalidObjectException.class, MetaException.class, InvalidInputException.class) .stopOnIllegalExceptions() - .run("setTableColumnStatistics", stats.getCreateDatabase().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - client.setTableColumnStatistics(databaseName, tableName, statistics); - } - return null; - })); + .run("setTableColumnStatistics", stats.getUpdateTableColumnStatistics().wrap(() -> + getMetastoreClientThenCall(metastoreContext, client -> { + client.setTableColumnStatistics(databaseName, tableName, statistics); + return null; + }))); } catch (NoSuchObjectException e) { throw new TableNotFoundException(new SchemaTableName(databaseName, tableName)); @@ -447,18 +442,17 @@ private void setTableColumnStatistics(String databaseName, String tableName, Lis } } - private void deleteTableColumnStatistics(String databaseName, String tableName, String columnName) + private void deleteTableColumnStatistics(MetastoreContext metastoreContext, String databaseName, String tableName, String columnName) { try { retry() .stopOn(NoSuchObjectException.class, InvalidObjectException.class, MetaException.class, InvalidInputException.class) .stopOnIllegalExceptions() - .run("deleteTableColumnStatistics", stats.getCreateDatabase().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - client.deleteTableColumnStatistics(databaseName, tableName, columnName); - } - return null; - })); + .run("deleteTableColumnStatistics", stats.getUpdatePartitionColumnStatistics().wrap(() -> + getMetastoreClientThenCall(metastoreContext, client -> { + client.deleteTableColumnStatistics(databaseName, tableName, columnName); + return null; + }))); } catch (NoSuchObjectException e) { throw new TableNotFoundException(new SchemaTableName(databaseName, tableName)); @@ -472,13 +466,13 @@ private void deleteTableColumnStatistics(String databaseName, String tableName, } @Override - public synchronized void updatePartitionStatistics(String databaseName, String tableName, String partitionName, Function update) + public synchronized void updatePartitionStatistics(MetastoreContext metastoreContext, String databaseName, String tableName, String partitionName, Function update) { PartitionStatistics currentStatistics = requireNonNull( - getPartitionStatistics(databaseName, tableName, ImmutableSet.of(partitionName)).get(partitionName), "getPartitionStatistics() returned null"); + getPartitionStatistics(metastoreContext, databaseName, tableName, ImmutableSet.of(partitionName)).get(partitionName), "getPartitionStatistics() returned null"); PartitionStatistics updatedStatistics = update.apply(currentStatistics); - List partitions = getPartitionsByNames(databaseName, tableName, ImmutableList.of(partitionName)); + List partitions = getPartitionsByNames(metastoreContext, databaseName, tableName, ImmutableList.of(partitionName)); if (partitions.size() != 1) { throw new PrestoException(HIVE_METASTORE_ERROR, "Metastore returned multiple partitions for name: " + partitionName); } @@ -487,17 +481,18 @@ public synchronized void updatePartitionStatistics(String databaseName, String t Partition modifiedPartition = originalPartition.deepCopy(); HiveBasicStatistics basicStatistics = updatedStatistics.getBasicStatistics(); modifiedPartition.setParameters(updateStatisticsParameters(modifiedPartition.getParameters(), basicStatistics)); - alterPartitionWithoutStatistics(databaseName, tableName, modifiedPartition); + alterPartitionWithoutStatistics(metastoreContext, databaseName, tableName, modifiedPartition); Map columns = modifiedPartition.getSd().getCols().stream() .collect(toImmutableMap(FieldSchema::getName, schema -> HiveType.valueOf(schema.getType()))); - setPartitionColumnStatistics(databaseName, tableName, partitionName, columns, updatedStatistics.getColumnStatistics(), basicStatistics.getRowCount()); + setPartitionColumnStatistics(metastoreContext, databaseName, tableName, partitionName, columns, updatedStatistics.getColumnStatistics(), basicStatistics.getRowCount()); Set removedStatistics = difference(currentStatistics.getColumnStatistics().keySet(), updatedStatistics.getColumnStatistics().keySet()); - removedStatistics.forEach(column -> deletePartitionColumnStatistics(databaseName, tableName, partitionName, column)); + removedStatistics.forEach(column -> deletePartitionColumnStatistics(metastoreContext, databaseName, tableName, partitionName, column)); } private void setPartitionColumnStatistics( + MetastoreContext metastoreContext, String databaseName, String tableName, String partitionName, @@ -510,22 +505,21 @@ private void setPartitionColumnStatistics( .map(entry -> createMetastoreColumnStatistics(entry.getKey(), columns.get(entry.getKey()), entry.getValue(), rowCount)) .collect(toImmutableList()); if (!metastoreColumnStatistics.isEmpty()) { - setPartitionColumnStatistics(databaseName, tableName, partitionName, metastoreColumnStatistics); + setPartitionColumnStatistics(metastoreContext, databaseName, tableName, partitionName, metastoreColumnStatistics); } } - private void setPartitionColumnStatistics(String databaseName, String tableName, String partitionName, List statistics) + private void setPartitionColumnStatistics(MetastoreContext metastoreContext, String databaseName, String tableName, String partitionName, List statistics) { try { retry() .stopOn(NoSuchObjectException.class, InvalidObjectException.class, MetaException.class, InvalidInputException.class) .stopOnIllegalExceptions() - .run("setPartitionColumnStatistics", stats.getCreateDatabase().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - client.setPartitionColumnStatistics(databaseName, tableName, partitionName, statistics); - } - return null; - })); + .run("setPartitionColumnStatistics", stats.getUpdatePartitionColumnStatistics().wrap(() -> + getMetastoreClientThenCall(metastoreContext, client -> { + client.setPartitionColumnStatistics(databaseName, tableName, partitionName, statistics); + return null; + }))); } catch (NoSuchObjectException e) { throw new TableNotFoundException(new SchemaTableName(databaseName, tableName)); @@ -538,18 +532,17 @@ private void setPartitionColumnStatistics(String databaseName, String tableName, } } - private void deletePartitionColumnStatistics(String databaseName, String tableName, String partitionName, String columnName) + private void deletePartitionColumnStatistics(MetastoreContext metastoreContext, String databaseName, String tableName, String partitionName, String columnName) { try { retry() .stopOn(NoSuchObjectException.class, InvalidObjectException.class, MetaException.class, InvalidInputException.class) .stopOnIllegalExceptions() - .run("deletePartitionColumnStatistics", stats.getCreateDatabase().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - client.deletePartitionColumnStatistics(databaseName, tableName, partitionName, columnName); - } - return null; - })); + .run("deletePartitionColumnStatistics", stats.getUpdatePartitionColumnStatistics().wrap(() -> + getMetastoreClientThenCall(metastoreContext, client -> { + client.deletePartitionColumnStatistics(databaseName, tableName, partitionName, columnName); + return null; + }))); } catch (NoSuchObjectException e) { throw new TableNotFoundException(new SchemaTableName(databaseName, tableName)); @@ -563,18 +556,17 @@ private void deletePartitionColumnStatistics(String databaseName, String tableNa } @Override - public void createRole(String role, String grantor) + public void createRole(MetastoreContext metastoreContext, String role, String grantor) { try { retry() .stopOn(MetaException.class) .stopOnIllegalExceptions() - .run("createRole", stats.getCreateRole().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - client.createRole(role, grantor); - return null; - } - })); + .run("createRole", stats.getCreateRole().wrap(() -> + getMetastoreClientThenCall(metastoreContext, client -> { + client.createRole(role, grantor); + return null; + }))); } catch (TException e) { throw new PrestoException(HIVE_METASTORE_ERROR, e); @@ -585,18 +577,17 @@ public void createRole(String role, String grantor) } @Override - public void dropRole(String role) + public void dropRole(MetastoreContext metastoreContext, String role) { try { retry() .stopOn(MetaException.class) .stopOnIllegalExceptions() - .run("dropRole", stats.getDropRole().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - client.dropRole(role); - return null; - } - })); + .run("dropRole", stats.getDropRole().wrap(() -> + getMetastoreClientThenCall(metastoreContext, client -> { + client.dropRole(role); + return null; + }))); } catch (TException e) { throw new PrestoException(HIVE_METASTORE_ERROR, e); @@ -607,17 +598,14 @@ public void dropRole(String role) } @Override - public Set listRoles() + public Set listRoles(MetastoreContext metastoreContext) { try { return retry() .stopOn(MetaException.class) .stopOnIllegalExceptions() - .run("listRoles", stats.getListRoles().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - return ImmutableSet.copyOf(client.getRoleNames()); - } - })); + .run("listRoles", stats.getListRoles().wrap(() -> + getMetastoreClientThenCall(metastoreContext, client -> ImmutableSet.copyOf(client.getRoleNames())))); } catch (TException e) { throw new PrestoException(HIVE_METASTORE_ERROR, e); @@ -628,11 +616,12 @@ public Set listRoles() } @Override - public void grantRoles(Set roles, Set grantees, boolean withAdminOption, PrestoPrincipal grantor) + public void grantRoles(MetastoreContext metastoreContext, Set roles, Set grantees, boolean withAdminOption, PrestoPrincipal grantor) { for (PrestoPrincipal grantee : grantees) { for (String role : roles) { grantRole( + metastoreContext, role, grantee.getName(), fromPrestoPrincipalType(grantee.getType()), grantor.getName(), fromPrestoPrincipalType(grantor.getType()), @@ -641,18 +630,17 @@ public void grantRoles(Set roles, Set grantees, boolean } } - private void grantRole(String role, String granteeName, PrincipalType granteeType, String grantorName, PrincipalType grantorType, boolean grantOption) + private void grantRole(MetastoreContext metastoreContext, String role, String granteeName, PrincipalType granteeType, String grantorName, PrincipalType grantorType, boolean grantOption) { try { retry() .stopOn(MetaException.class) .stopOnIllegalExceptions() - .run("grantRole", stats.getGrantRole().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - client.grantRole(role, granteeName, granteeType, grantorName, grantorType, grantOption); - return null; - } - })); + .run("grantRole", stats.getGrantRole().wrap(() -> + getMetastoreClientThenCall(metastoreContext, client -> { + client.grantRole(role, granteeName, granteeType, grantorName, grantorType, grantOption); + return null; + }))); } catch (TException e) { throw new PrestoException(HIVE_METASTORE_ERROR, e); @@ -663,11 +651,12 @@ private void grantRole(String role, String granteeName, PrincipalType granteeTyp } @Override - public void revokeRoles(Set roles, Set grantees, boolean adminOptionFor, PrestoPrincipal grantor) + public void revokeRoles(MetastoreContext metastoreContext, Set roles, Set grantees, boolean adminOptionFor, PrestoPrincipal grantor) { for (PrestoPrincipal grantee : grantees) { for (String role : roles) { revokeRole( + metastoreContext, role, grantee.getName(), fromPrestoPrincipalType(grantee.getType()), adminOptionFor); @@ -675,18 +664,17 @@ public void revokeRoles(Set roles, Set grantees, boolea } } - private void revokeRole(String role, String granteeName, PrincipalType granteeType, boolean grantOption) + private void revokeRole(MetastoreContext metastoreContext, String role, String granteeName, PrincipalType granteeType, boolean grantOption) { try { retry() .stopOn(MetaException.class) .stopOnIllegalExceptions() - .run("revokeRole", stats.getRevokeRole().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - client.revokeRole(role, granteeName, granteeType, grantOption); - return null; - } - })); + .run("revokeRole", stats.getRevokeRole().wrap(() -> + getMetastoreClientThenCall(metastoreContext, client -> { + client.revokeRole(role, granteeName, granteeType, grantOption); + return null; + }))); } catch (TException e) { throw new PrestoException(HIVE_METASTORE_ERROR, e); @@ -697,17 +685,15 @@ private void revokeRole(String role, String granteeName, PrincipalType granteeTy } @Override - public Set listRoleGrants(PrestoPrincipal principal) + public Set listRoleGrants(MetastoreContext metastoreContext, PrestoPrincipal principal) { try { return retry() .stopOn(MetaException.class) .stopOnIllegalExceptions() - .run("listRoleGrants", stats.getListRoleGrants().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - return fromRolePrincipalGrants(client.listRoleGrants(principal.getName(), fromPrestoPrincipalType(principal.getType()))); - } - })); + .run("listRoleGrants", stats.getListRoleGrants().wrap(() -> + getMetastoreClientThenCall(metastoreContext, client -> + fromRolePrincipalGrants(client.listRoleGrants(principal.getName(), fromPrestoPrincipalType(principal.getType())))))); } catch (TException e) { throw new PrestoException(HIVE_METASTORE_ERROR, e); @@ -718,18 +704,17 @@ public Set listRoleGrants(PrestoPrincipal principal) } @Override - public Optional> getAllViews(String databaseName) + public Optional> getAllViews(MetastoreContext metastoreContext, String databaseName) { try { return retry() .stopOn(UnknownDBException.class) .stopOnIllegalExceptions() - .run("getAllViews", stats.getGetAllViews().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - String filter = HIVE_FILTER_FIELD_PARAMS + PRESTO_VIEW_FLAG + " = \"true\""; - return Optional.of(client.getTableNamesByFilter(databaseName, filter)); - } - })); + .run("getAllViews", stats.getGetAllViews().wrap(() -> + getMetastoreClientThenCall(metastoreContext, client -> { + String filter = HIVE_FILTER_FIELD_PARAMS + PRESTO_VIEW_FLAG + " = \"true\""; + return Optional.of(client.getTableNamesByFilter(databaseName, filter)); + }))); } catch (UnknownDBException e) { return Optional.empty(); @@ -743,18 +728,17 @@ public Optional> getAllViews(String databaseName) } @Override - public void createDatabase(Database database) + public void createDatabase(MetastoreContext metastoreContext, Database database) { try { retry() .stopOn(AlreadyExistsException.class, InvalidObjectException.class, MetaException.class) .stopOnIllegalExceptions() - .run("createDatabase", stats.getCreateDatabase().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - client.createDatabase(database); - } - return null; - })); + .run("createDatabase", stats.getCreateDatabase().wrap(() -> + getMetastoreClientThenCall(metastoreContext, client -> { + client.createDatabase(database); + return null; + }))); } catch (AlreadyExistsException e) { throw new SchemaAlreadyExistsException(database.getName()); @@ -768,18 +752,17 @@ public void createDatabase(Database database) } @Override - public void dropDatabase(String databaseName) + public void dropDatabase(MetastoreContext metastoreContext, String databaseName) { try { retry() .stopOn(NoSuchObjectException.class, InvalidOperationException.class) .stopOnIllegalExceptions() - .run("dropDatabase", stats.getDropDatabase().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - client.dropDatabase(databaseName, false, false); - } - return null; - })); + .run("dropDatabase", stats.getAlterDatabase().wrap(() -> + getMetastoreClientThenCall(metastoreContext, client -> { + client.dropDatabase(databaseName, false, false); + return null; + }))); } catch (NoSuchObjectException e) { throw new SchemaNotFoundException(databaseName); @@ -793,18 +776,17 @@ public void dropDatabase(String databaseName) } @Override - public void alterDatabase(String databaseName, Database database) + public void alterDatabase(MetastoreContext metastoreContext, String databaseName, Database database) { try { retry() .stopOn(NoSuchObjectException.class, MetaException.class) .stopOnIllegalExceptions() - .run("alterDatabase", stats.getAlterDatabase().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - client.alterDatabase(databaseName, database); - } - return null; - })); + .run("alterDatabase", stats.getAlterDatabase().wrap(() -> + getMetastoreClientThenCall(metastoreContext, client -> { + client.alterDatabase(databaseName, database); + return null; + }))); } catch (NoSuchObjectException e) { throw new SchemaNotFoundException(databaseName); @@ -818,18 +800,17 @@ public void alterDatabase(String databaseName, Database database) } @Override - public void createTable(Table table) + public void createTable(MetastoreContext metastoreContext, Table table) { try { retry() .stopOn(AlreadyExistsException.class, InvalidObjectException.class, MetaException.class, NoSuchObjectException.class) .stopOnIllegalExceptions() - .run("createTable", stats.getCreateTable().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - client.createTable(table); - } - return null; - })); + .run("createTable", stats.getCreateTable().wrap(() -> + getMetastoreClientThenCall(metastoreContext, client -> { + client.createTable(table); + return null; + }))); } catch (AlreadyExistsException e) { throw new TableAlreadyExistsException(new SchemaTableName(table.getDbName(), table.getTableName())); @@ -846,18 +827,17 @@ public void createTable(Table table) } @Override - public void dropTable(String databaseName, String tableName, boolean deleteData) + public void dropTable(MetastoreContext metastoreContext, String databaseName, String tableName, boolean deleteData) { try { retry() .stopOn(NoSuchObjectException.class) .stopOnIllegalExceptions() - .run("dropTable", stats.getDropTable().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - client.dropTable(databaseName, tableName, deleteData); - } - return null; - })); + .run("dropTable", stats.getDropTable().wrap(() -> + getMetastoreClientThenCall(metastoreContext, client -> { + client.dropTable(databaseName, tableName, deleteData); + return null; + }))); } catch (NoSuchObjectException e) { throw new TableNotFoundException(new SchemaTableName(databaseName, tableName)); @@ -871,22 +851,21 @@ public void dropTable(String databaseName, String tableName, boolean deleteData) } @Override - public void alterTable(String databaseName, String tableName, Table table) + public void alterTable(MetastoreContext metastoreContext, String databaseName, String tableName, Table table) { try { retry() .stopOn(InvalidOperationException.class, MetaException.class) .stopOnIllegalExceptions() - .run("alterTable", stats.getAlterTable().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - Optional
source = getTable(databaseName, tableName); - if (!source.isPresent()) { - throw new TableNotFoundException(new SchemaTableName(databaseName, tableName)); - } - client.alterTable(databaseName, tableName, table); - } - return null; - })); + .run("alterTable", stats.getAlterTable().wrap(() -> + getMetastoreClientThenCall(metastoreContext, client -> { + Optional
source = getTable(metastoreContext, databaseName, tableName); + if (!source.isPresent()) { + throw new TableNotFoundException(new SchemaTableName(databaseName, tableName)); + } + client.alterTable(databaseName, tableName, table); + return null; + }))); } catch (NoSuchObjectException e) { throw new TableNotFoundException(new SchemaTableName(databaseName, tableName)); @@ -900,17 +879,14 @@ public void alterTable(String databaseName, String tableName, Table table) } @Override - public Optional> getPartitionNames(String databaseName, String tableName) + public Optional> getPartitionNames(MetastoreContext metastoreContext, String databaseName, String tableName) { try { return retry() .stopOn(NoSuchObjectException.class) .stopOnIllegalExceptions() - .run("getPartitionNames", stats.getGetPartitionNames().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - return Optional.of(client.getPartitionNames(databaseName, tableName)); - } - })); + .run("getPartitionNames", stats.getGetPartitionNames().wrap(() -> + getMetastoreClientThenCall(metastoreContext, client -> Optional.of(client.getPartitionNames(databaseName, tableName))))); } catch (NoSuchObjectException e) { return Optional.empty(); @@ -924,17 +900,14 @@ public Optional> getPartitionNames(String databaseName, String tabl } @Override - public Optional> getPartitionNamesByParts(String databaseName, String tableName, List parts) + public Optional> getPartitionNamesByParts(MetastoreContext metastoreContext, String databaseName, String tableName, List parts) { try { return retry() .stopOn(NoSuchObjectException.class) .stopOnIllegalExceptions() - .run("getPartitionNamesByParts", stats.getGetPartitionNamesPs().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - return Optional.of(client.getPartitionNamesFiltered(databaseName, tableName, parts)); - } - })); + .run("getPartitionNamesByParts", stats.getGetPartitionNamesPs().wrap(() -> + getMetastoreClientThenCall(metastoreContext, client -> Optional.of(client.getPartitionNamesFiltered(databaseName, tableName, parts))))); } catch (NoSuchObjectException e) { return Optional.empty(); @@ -948,25 +921,25 @@ public Optional> getPartitionNamesByParts(String databaseName, Stri } @Override - public List getPartitionNamesByFilter(String databaseName, String tableName, Map partitionPredicates) + public List getPartitionNamesByFilter(MetastoreContext metastoreContext, String databaseName, String tableName, Map partitionPredicates) { List parts = convertPredicateToParts(partitionPredicates); - return getPartitionNamesByParts(databaseName, tableName, parts).orElse(ImmutableList.of()); + return getPartitionNamesByParts(metastoreContext, databaseName, tableName, parts).orElse(ImmutableList.of()); } @Override - public void addPartitions(String databaseName, String tableName, List partitionsWithStatistics) + public void addPartitions(MetastoreContext metastoreContext, String databaseName, String tableName, List partitionsWithStatistics) { List partitions = partitionsWithStatistics.stream() .map(ThriftMetastoreUtil::toMetastoreApiPartition) .collect(toImmutableList()); - addPartitionsWithoutStatistics(databaseName, tableName, partitions); + addPartitionsWithoutStatistics(metastoreContext, databaseName, tableName, partitions); for (PartitionWithStatistics partitionWithStatistics : partitionsWithStatistics) { - storePartitionColumnStatistics(databaseName, tableName, partitionWithStatistics.getPartitionName(), partitionWithStatistics); + storePartitionColumnStatistics(metastoreContext, databaseName, tableName, partitionWithStatistics.getPartitionName(), partitionWithStatistics); } } - private void addPartitionsWithoutStatistics(String databaseName, String tableName, List partitions) + private void addPartitionsWithoutStatistics(MetastoreContext metastoreContext, String databaseName, String tableName, List partitions) { if (partitions.isEmpty()) { return; @@ -975,16 +948,15 @@ private void addPartitionsWithoutStatistics(String databaseName, String tableNam retry() .stopOn(AlreadyExistsException.class, InvalidObjectException.class, MetaException.class, NoSuchObjectException.class, PrestoException.class) .stopOnIllegalExceptions() - .run("addPartitions", stats.getAddPartitions().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - int partitionsAdded = client.addPartitions(partitions); - if (partitionsAdded != partitions.size()) { - throw new PrestoException(HIVE_METASTORE_ERROR, - format("Hive metastore only added %s of %s partitions", partitionsAdded, partitions.size())); - } - return null; - } - })); + .run("addPartitions", stats.getAddPartitions().wrap(() -> + getMetastoreClientThenCall(metastoreContext, client -> { + int partitionsAdded = client.addPartitions(partitions); + if (partitionsAdded != partitions.size()) { + throw new PrestoException(HIVE_METASTORE_ERROR, + format("Hive metastore only added %s of %s partitions", partitionsAdded, partitions.size())); + } + return null; + }))); } catch (AlreadyExistsException e) { throw new PrestoException(ALREADY_EXISTS, format("One or more partitions already exist for table '%s.%s'", databaseName, tableName), e); @@ -1000,19 +972,42 @@ private void addPartitionsWithoutStatistics(String databaseName, String tableNam } } + private V getMetastoreClientThenCall(MetastoreContext metastoreContext, MetastoreCallable callable) + throws Exception + { + if (!impersonationEnabled) { + try (HiveMetastoreClient client = clientProvider.createMetastoreClient(Optional.empty())) { + return callable.call(client); + } + } + String token; + try (HiveMetastoreClient client = clientProvider.createMetastoreClient(Optional.empty())) { + token = client.getDelegationToken(metastoreContext.getUsername(), metastoreContext.getUsername()); + } + try (HiveMetastoreClient realClient = clientProvider.createMetastoreClient(Optional.of(token))) { + return callable.call(realClient); + } + } + + @FunctionalInterface + public interface MetastoreCallable + { + V call(HiveMetastoreClient client) + throws Exception; + } + @Override - public void dropPartition(String databaseName, String tableName, List parts, boolean deleteData) + public void dropPartition(MetastoreContext metastoreContext, String databaseName, String tableName, List parts, boolean deleteData) { try { retry() .stopOn(NoSuchObjectException.class, MetaException.class) .stopOnIllegalExceptions() - .run("dropPartition", stats.getDropPartition().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - client.dropPartition(databaseName, tableName, parts, deleteData); - } - return null; - })); + .run("dropPartition", stats.getDropPartition().wrap(() -> + getMetastoreClientThenCall(metastoreContext, client -> { + client.dropPartition(databaseName, tableName, parts, deleteData); + return null; + }))); } catch (NoSuchObjectException e) { throw new PartitionNotFoundException(new SchemaTableName(databaseName, tableName), parts); @@ -1026,25 +1021,24 @@ public void dropPartition(String databaseName, String tableName, List pa } @Override - public void alterPartition(String databaseName, String tableName, PartitionWithStatistics partitionWithStatistics) + public void alterPartition(MetastoreContext metastoreContext, String databaseName, String tableName, PartitionWithStatistics partitionWithStatistics) { - alterPartitionWithoutStatistics(databaseName, tableName, toMetastoreApiPartition(partitionWithStatistics)); - storePartitionColumnStatistics(databaseName, tableName, partitionWithStatistics.getPartitionName(), partitionWithStatistics); - dropExtraColumnStatisticsAfterAlterPartition(databaseName, tableName, partitionWithStatistics); + alterPartitionWithoutStatistics(metastoreContext, databaseName, tableName, toMetastoreApiPartition(partitionWithStatistics)); + storePartitionColumnStatistics(metastoreContext, databaseName, tableName, partitionWithStatistics.getPartitionName(), partitionWithStatistics); + dropExtraColumnStatisticsAfterAlterPartition(metastoreContext, databaseName, tableName, partitionWithStatistics); } - private void alterPartitionWithoutStatistics(String databaseName, String tableName, Partition partition) + private void alterPartitionWithoutStatistics(MetastoreContext metastoreContext, String databaseName, String tableName, Partition partition) { try { retry() .stopOn(NoSuchObjectException.class, MetaException.class) .stopOnIllegalExceptions() - .run("alterPartition", stats.getAlterPartition().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - client.alterPartition(databaseName, tableName, partition); - } - return null; - })); + .run("alterPartition", stats.getAlterPartition().wrap(() -> + getMetastoreClientThenCall(metastoreContext, client -> { + client.alterPartition(databaseName, tableName, partition); + return null; + }))); } catch (NoSuchObjectException e) { throw new PartitionNotFoundException(new SchemaTableName(databaseName, tableName), partition.getValues()); @@ -1057,7 +1051,7 @@ private void alterPartitionWithoutStatistics(String databaseName, String tableNa } } - private void storePartitionColumnStatistics(String databaseName, String tableName, String partitionName, PartitionWithStatistics partitionWithStatistics) + private void storePartitionColumnStatistics(MetastoreContext metastoreContext, String databaseName, String tableName, String partitionName, PartitionWithStatistics partitionWithStatistics) { PartitionStatistics statistics = partitionWithStatistics.getStatistics(); Map columnStatistics = statistics.getColumnStatistics(); @@ -1066,7 +1060,7 @@ private void storePartitionColumnStatistics(String databaseName, String tableNam } Map columnTypes = partitionWithStatistics.getPartition().getColumns().stream() .collect(toImmutableMap(Column::getName, Column::getType)); - setPartitionColumnStatistics(databaseName, tableName, partitionName, columnTypes, columnStatistics, statistics.getBasicStatistics().getRowCount()); + setPartitionColumnStatistics(metastoreContext, databaseName, tableName, partitionName, columnTypes, columnStatistics, statistics.getBasicStatistics().getRowCount()); } /* @@ -1078,6 +1072,7 @@ private void storePartitionColumnStatistics(String databaseName, String tableNam * if is needed to explicitly remove the statistics from the metastore for that columns. */ private void dropExtraColumnStatisticsAfterAlterPartition( + MetastoreContext metastoreContext, String databaseName, String tableName, PartitionWithStatistics partitionWithStatistics) @@ -1099,6 +1094,7 @@ private void dropExtraColumnStatisticsAfterAlterPartition( // when trying to remove any missing statistics the metastore throws NoSuchObjectException String partitionName = partitionWithStatistics.getPartitionName(); List statisticsToBeRemoved = getMetastorePartitionColumnStatistics( + metastoreContext, databaseName, tableName, ImmutableSet.of(partitionName), @@ -1106,23 +1102,20 @@ private void dropExtraColumnStatisticsAfterAlterPartition( .getOrDefault(partitionName, ImmutableList.of()); for (ColumnStatisticsObj statistics : statisticsToBeRemoved) { - deletePartitionColumnStatistics(databaseName, tableName, partitionName, statistics.getColName()); + deletePartitionColumnStatistics(metastoreContext, databaseName, tableName, partitionName, statistics.getColName()); } } @Override - public Optional getPartition(String databaseName, String tableName, List partitionValues) + public Optional getPartition(MetastoreContext metastoreContext, String databaseName, String tableName, List partitionValues) { requireNonNull(partitionValues, "partitionValues is null"); try { return retry() .stopOn(NoSuchObjectException.class) .stopOnIllegalExceptions() - .run("getPartition", stats.getGetPartition().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - return Optional.of(client.getPartition(databaseName, tableName, partitionValues)); - } - })); + .run("getPartition", stats.getGetPartition().wrap(() -> + getMetastoreClientThenCall(metastoreContext, client -> Optional.of(client.getPartition(databaseName, tableName, partitionValues))))); } catch (NoSuchObjectException e) { return Optional.empty(); @@ -1136,7 +1129,7 @@ public Optional getPartition(String databaseName, String tableName, L } @Override - public List getPartitionsByNames(String databaseName, String tableName, List partitionNames) + public List getPartitionsByNames(MetastoreContext metastoreContext, String databaseName, String tableName, List partitionNames) { requireNonNull(partitionNames, "partitionNames is null"); checkArgument(!Iterables.isEmpty(partitionNames), "partitionNames is empty"); @@ -1145,11 +1138,8 @@ public List getPartitionsByNames(String databaseName, String tableNam return retry() .stopOn(NoSuchObjectException.class) .stopOnIllegalExceptions() - .run("getPartitionsByNames", stats.getGetPartitionsByNames().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - return client.getPartitionsByNames(databaseName, tableName, partitionNames); - } - })); + .run("getPartitionsByNames", stats.getGetPartitionsByNames().wrap(() -> + getMetastoreClientThenCall(metastoreContext, client -> client.getPartitionsByNames(databaseName, tableName, partitionNames)))); } catch (NoSuchObjectException e) { // assume none of the partitions in the batch are available @@ -1164,7 +1154,7 @@ public List getPartitionsByNames(String databaseName, String tableNam } @Override - public void grantTablePrivileges(String databaseName, String tableName, PrestoPrincipal grantee, Set privileges) + public void grantTablePrivileges(MetastoreContext metastoreContext, String databaseName, String tableName, PrestoPrincipal grantee, Set privileges) { Set requestedPrivileges = privileges.stream() .map(ThriftMetastoreUtil::toMetastoreApiPrivilegeGrantInfo) @@ -1174,37 +1164,34 @@ public void grantTablePrivileges(String databaseName, String tableName, PrestoPr try { retry() .stopOnIllegalExceptions() - .run("grantTablePrivileges", stats.getGrantTablePrivileges().wrap(() -> { - try (HiveMetastoreClient metastoreClient = clientProvider.createMetastoreClient()) { - Set existingPrivileges = listTablePrivileges(databaseName, tableName, grantee); - - Set privilegesToGrant = new HashSet<>(requestedPrivileges); - Iterator iterator = privilegesToGrant.iterator(); - while (iterator.hasNext()) { - HivePrivilegeInfo requestedPrivilege = getOnlyElement(parsePrivilege(iterator.next(), Optional.empty())); - - for (HivePrivilegeInfo existingPrivilege : existingPrivileges) { - if ((requestedPrivilege.isContainedIn(existingPrivilege))) { - iterator.remove(); - } - else if (existingPrivilege.isContainedIn(requestedPrivilege)) { - throw new PrestoException(NOT_SUPPORTED, format( - "Granting %s WITH GRANT OPTION is not supported while %s possesses %s", - requestedPrivilege.getHivePrivilege().name(), - grantee, - requestedPrivilege.getHivePrivilege().name())); + .run("grantTablePrivileges", stats.getGrantTablePrivileges().wrap(() -> + getMetastoreClientThenCall(metastoreContext, client -> { + Set existingPrivileges = listTablePrivileges(metastoreContext, databaseName, tableName, grantee); + Set privilegesToGrant = new HashSet<>(requestedPrivileges); + Iterator iterator = privilegesToGrant.iterator(); + while (iterator.hasNext()) { + HivePrivilegeInfo requestedPrivilege = getOnlyElement(parsePrivilege(iterator.next(), Optional.empty())); + + for (HivePrivilegeInfo existingPrivilege : existingPrivileges) { + if ((requestedPrivilege.isContainedIn(existingPrivilege))) { + iterator.remove(); + } + else if (existingPrivilege.isContainedIn(requestedPrivilege)) { + throw new PrestoException(NOT_SUPPORTED, format( + "Granting %s WITH GRANT OPTION is not supported while %s possesses %s", + requestedPrivilege.getHivePrivilege().name(), + grantee, + requestedPrivilege.getHivePrivilege().name())); + } } } - } - if (privilegesToGrant.isEmpty()) { - return null; - } + if (privilegesToGrant.isEmpty()) { + return null; + } - metastoreClient.grantPrivileges(buildPrivilegeBag(databaseName, tableName, grantee, privilegesToGrant)); - } - return null; - })); + return client.grantPrivileges(buildPrivilegeBag(databaseName, tableName, grantee, privilegesToGrant)); + }))); } catch (TException e) { throw new PrestoException(HIVE_METASTORE_ERROR, e); @@ -1215,7 +1202,7 @@ else if (existingPrivilege.isContainedIn(requestedPrivilege)) { } @Override - public void revokeTablePrivileges(String databaseName, String tableName, PrestoPrincipal grantee, Set privileges) + public void revokeTablePrivileges(MetastoreContext metastoreContext, String databaseName, String tableName, PrestoPrincipal grantee, Set privileges) { Set requestedPrivileges = privileges.stream() .map(ThriftMetastoreUtil::toMetastoreApiPrivilegeGrantInfo) @@ -1225,24 +1212,22 @@ public void revokeTablePrivileges(String databaseName, String tableName, PrestoP try { retry() .stopOnIllegalExceptions() - .run("revokeTablePrivileges", stats.getRevokeTablePrivileges().wrap(() -> { - try (HiveMetastoreClient metastoreClient = clientProvider.createMetastoreClient()) { - Set existingHivePrivileges = listTablePrivileges(databaseName, tableName, grantee).stream() - .map(HivePrivilegeInfo::getHivePrivilege) - .collect(toSet()); - - Set privilegesToRevoke = requestedPrivileges.stream() - .filter(privilegeGrantInfo -> existingHivePrivileges.contains(getOnlyElement(parsePrivilege(privilegeGrantInfo, Optional.empty())).getHivePrivilege())) - .collect(toSet()); - - if (privilegesToRevoke.isEmpty()) { - return null; - } + .run("revokeTablePrivileges", stats.getRevokeTablePrivileges().wrap(() -> + getMetastoreClientThenCall(metastoreContext, client -> { + Set existingHivePrivileges = listTablePrivileges(metastoreContext, databaseName, tableName, grantee).stream() + .map(HivePrivilegeInfo::getHivePrivilege) + .collect(toSet()); + + Set privilegesToRevoke = requestedPrivileges.stream() + .filter(privilegeGrantInfo -> existingHivePrivileges.contains(getOnlyElement(parsePrivilege(privilegeGrantInfo, Optional.empty())).getHivePrivilege())) + .collect(toSet()); + + if (privilegesToRevoke.isEmpty()) { + return null; + } - metastoreClient.revokePrivileges(buildPrivilegeBag(databaseName, tableName, grantee, privilegesToRevoke)); - } - return null; - })); + return client.revokePrivileges(buildPrivilegeBag(databaseName, tableName, grantee, privilegesToRevoke)); + }))); } catch (TException e) { throw new PrestoException(HIVE_METASTORE_ERROR, e); @@ -1253,39 +1238,38 @@ public void revokeTablePrivileges(String databaseName, String tableName, PrestoP } @Override - public Set listTablePrivileges(String databaseName, String tableName, PrestoPrincipal principal) + public Set listTablePrivileges(MetastoreContext metastoreContext, String databaseName, String tableName, PrestoPrincipal principal) { try { return retry() .stopOnIllegalExceptions() - .run("getListPrivileges", stats.getListPrivileges().wrap(() -> { - try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { - Table table = client.getTable(databaseName, tableName); - ImmutableSet.Builder privileges = ImmutableSet.builder(); - List hiveObjectPrivilegeList; - // principal can be null when we want to list all privileges for admins - if (principal == null) { - hiveObjectPrivilegeList = client.listPrivileges( - null, - null, - new HiveObjectRef(TABLE, databaseName, tableName, null, null)); - } - else { - if (principal.getType() == USER && table.getOwner().equals(principal.getName())) { - privileges.add(new HivePrivilegeInfo(OWNERSHIP, true, principal, principal)); + .run("getListPrivileges", stats.getListPrivileges().wrap(() -> + getMetastoreClientThenCall(metastoreContext, client -> { + Table table = client.getTable(databaseName, tableName); + ImmutableSet.Builder privileges = ImmutableSet.builder(); + List hiveObjectPrivilegeList; + // principal can be null when we want to list all privileges for admins + if (principal == null) { + hiveObjectPrivilegeList = client.listPrivileges( + null, + null, + new HiveObjectRef(TABLE, databaseName, tableName, null, null)); } - hiveObjectPrivilegeList = client.listPrivileges( - principal.getName(), - fromPrestoPrincipalType(principal.getType()), - new HiveObjectRef(TABLE, databaseName, tableName, null, null)); - } - for (HiveObjectPrivilege hiveObjectPrivilege : hiveObjectPrivilegeList) { - PrestoPrincipal grantee = new PrestoPrincipal(fromMetastoreApiPrincipalType(hiveObjectPrivilege.getPrincipalType()), hiveObjectPrivilege.getPrincipalName()); - privileges.addAll(parsePrivilege(hiveObjectPrivilege.getGrantInfo(), Optional.of(grantee))); - } - return privileges.build(); - } - })); + else { + if (principal.getType() == USER && table.getOwner().equals(principal.getName())) { + privileges.add(new HivePrivilegeInfo(OWNERSHIP, true, principal, principal)); + } + hiveObjectPrivilegeList = client.listPrivileges( + principal.getName(), + fromPrestoPrincipalType(principal.getType()), + new HiveObjectRef(TABLE, databaseName, tableName, null, null)); + } + for (HiveObjectPrivilege hiveObjectPrivilege : hiveObjectPrivilegeList) { + PrestoPrincipal grantee = new PrestoPrincipal(fromMetastoreApiPrincipalType(hiveObjectPrivilege.getPrincipalType()), hiveObjectPrivilege.getPrincipalName()); + privileges.addAll(parsePrivilege(hiveObjectPrivilege.getGrantInfo(), Optional.of(grantee))); + } + return privileges.build(); + }))); } catch (TException e) { throw new PrestoException(HIVE_METASTORE_ERROR, e); diff --git a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/ThriftHiveMetastoreClient.java b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/ThriftHiveMetastoreClient.java index 59021cf2d1edd..fddefc67da263 100644 --- a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/ThriftHiveMetastoreClient.java +++ b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/ThriftHiveMetastoreClient.java @@ -71,6 +71,13 @@ public void close() transport.close(); } + @Override + public String getDelegationToken(String owner, String renewer) + throws TException + { + return client.get_delegation_token(owner, renewer); + } + @Override public List getAllDatabases() throws TException diff --git a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/ThriftHiveMetastoreStats.java b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/ThriftHiveMetastoreStats.java index fa5f5cebd6192..a95f9290972ff 100644 --- a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/ThriftHiveMetastoreStats.java +++ b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/ThriftHiveMetastoreStats.java @@ -26,6 +26,8 @@ public class ThriftHiveMetastoreStats private final HiveMetastoreApiStats getFields = new HiveMetastoreApiStats(); private final HiveMetastoreApiStats getTableColumnStatistics = new HiveMetastoreApiStats(); private final HiveMetastoreApiStats getPartitionColumnStatistics = new HiveMetastoreApiStats(); + private final HiveMetastoreApiStats updateTableColumnStatistics = new HiveMetastoreApiStats(); + private final HiveMetastoreApiStats updatePartitionColumnStatistics = new HiveMetastoreApiStats(); private final HiveMetastoreApiStats getPartitionNames = new HiveMetastoreApiStats(); private final HiveMetastoreApiStats getPartitionNamesPs = new HiveMetastoreApiStats(); private final HiveMetastoreApiStats getPartition = new HiveMetastoreApiStats(); @@ -105,6 +107,20 @@ public HiveMetastoreApiStats getGetPartitionColumnStatistics() return getPartitionColumnStatistics; } + @Managed + @Nested + public HiveMetastoreApiStats getUpdateTableColumnStatistics() + { + return updateTableColumnStatistics; + } + + @Managed + @Nested + public HiveMetastoreApiStats getUpdatePartitionColumnStatistics() + { + return updatePartitionColumnStatistics; + } + @Managed @Nested public HiveMetastoreApiStats getGetPartitionNames() diff --git a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/ThriftMetastoreModule.java b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/ThriftMetastoreModule.java index 0da8535f2bf14..074aec9bccbbc 100644 --- a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/ThriftMetastoreModule.java +++ b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/ThriftMetastoreModule.java @@ -20,6 +20,7 @@ import com.facebook.presto.hive.metastore.CachingHiveMetastore; import com.facebook.presto.hive.metastore.ExtendedHiveMetastore; import com.facebook.presto.hive.metastore.RecordingHiveMetastore; +import com.facebook.presto.spi.ConnectorId; import com.google.inject.Binder; import com.google.inject.Scopes; @@ -46,6 +47,7 @@ protected void setup(Binder binder) configBinder(binder).bindConfig(StaticMetastoreConfig.class); binder.bind(HiveMetastore.class).to(ThriftHiveMetastore.class).in(Scopes.SINGLETON); + binder.bind(ConnectorId.class).toInstance(new ConnectorId(connectorId)); if (buildConfigObject(MetastoreClientConfig.class).getRecordingPath() != null) { binder.bind(ExtendedHiveMetastore.class) diff --git a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/ThriftMetastoreUtil.java b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/ThriftMetastoreUtil.java index 5d64b3665b94f..426a27b9267fa 100644 --- a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/ThriftMetastoreUtil.java +++ b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/ThriftMetastoreUtil.java @@ -20,6 +20,7 @@ import com.facebook.presto.hive.metastore.Database; import com.facebook.presto.hive.metastore.HiveColumnStatistics; import com.facebook.presto.hive.metastore.HivePrivilegeInfo; +import com.facebook.presto.hive.metastore.MetastoreContext; import com.facebook.presto.hive.metastore.MetastoreUtil; import com.facebook.presto.hive.metastore.Partition; import com.facebook.presto.hive.metastore.PartitionWithStatistics; @@ -252,18 +253,18 @@ protected RoleGrant computeNext() }); } - public static boolean isRoleApplicable(SemiTransactionalHiveMetastore metastore, PrestoPrincipal principal, String role) + public static boolean isRoleApplicable(SemiTransactionalHiveMetastore metastore, ConnectorIdentity identity, PrestoPrincipal principal, String role) { if (principal.getType() == ROLE && principal.getName().equals(role)) { return true; } - return listApplicableRoles(metastore, principal) + return listApplicableRoles(metastore, identity, principal) .anyMatch(role::equals); } - public static Stream listApplicableRoles(SemiTransactionalHiveMetastore metastore, PrestoPrincipal principal) + public static Stream listApplicableRoles(SemiTransactionalHiveMetastore metastore, ConnectorIdentity identity, PrestoPrincipal principal) { - return listApplicableRoles(principal, metastore::listRoleGrants) + return listApplicableRoles(principal, (PrestoPrincipal p) -> metastore.listRoleGrants(new MetastoreContext(identity), p)) .map(RoleGrant::getRoleName); } @@ -271,28 +272,28 @@ public static Stream listEnabledPrincipals(SemiTransactionalHiv { return Stream.concat( Stream.of(new PrestoPrincipal(USER, identity.getUser())), - listEnabledRoles(identity, metastore::listRoleGrants) + listEnabledRoles(identity, (PrestoPrincipal p) -> metastore.listRoleGrants(new MetastoreContext(identity), p)) .map(role -> new PrestoPrincipal(ROLE, role))); } public static Stream listEnabledTablePrivileges(SemiTransactionalHiveMetastore metastore, String databaseName, String tableName, ConnectorIdentity identity) { - return listTablePrivileges(metastore, databaseName, tableName, listEnabledPrincipals(metastore, identity)); + return listTablePrivileges(identity, metastore, databaseName, tableName, listEnabledPrincipals(metastore, identity)); } - public static Stream listApplicableTablePrivileges(SemiTransactionalHiveMetastore metastore, String databaseName, String tableName, String user) + public static Stream listApplicableTablePrivileges(SemiTransactionalHiveMetastore metastore, ConnectorIdentity identity, String databaseName, String tableName, String user) { PrestoPrincipal userPrincipal = new PrestoPrincipal(USER, user); Stream principals = Stream.concat( Stream.of(userPrincipal), - listApplicableRoles(metastore, userPrincipal) + listApplicableRoles(metastore, identity, userPrincipal) .map(role -> new PrestoPrincipal(ROLE, role))); - return listTablePrivileges(metastore, databaseName, tableName, principals); + return listTablePrivileges(identity, metastore, databaseName, tableName, principals); } - private static Stream listTablePrivileges(SemiTransactionalHiveMetastore metastore, String databaseName, String tableName, Stream principals) + private static Stream listTablePrivileges(ConnectorIdentity identity, SemiTransactionalHiveMetastore metastore, String databaseName, String tableName, Stream principals) { - return principals.flatMap(principal -> metastore.listTablePrivileges(databaseName, tableName, principal).stream()); + return principals.flatMap(principal -> metastore.listTablePrivileges(new MetastoreContext(identity), databaseName, tableName, principal).stream()); } public static boolean isRoleEnabled(ConnectorIdentity identity, Function> listRoleGrants, String role) diff --git a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/Transport.java b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/Transport.java index f23c245ba85b1..3ab0dafd5480e 100644 --- a/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/Transport.java +++ b/presto-hive-metastore/src/main/java/com/facebook/presto/hive/metastore/thrift/Transport.java @@ -36,12 +36,13 @@ public static TTransport create( Optional sslContext, Optional socksProxy, int timeoutMillis, - HiveMetastoreAuthentication authentication) + HiveMetastoreAuthentication authentication, + Optional tokenString) throws TTransportException { try { TTransport rawTransport = createRaw(address, sslContext, socksProxy, timeoutMillis); - TTransport authenticatedTransport = authentication.authenticate(rawTransport, address.getHost()); + TTransport authenticatedTransport = authentication.authenticate(rawTransport, address.getHost(), tokenString); if (!authenticatedTransport.isOpen()) { authenticatedTransport.open(); } diff --git a/presto-hive-metastore/src/test/java/com/facebook/presto/hive/MockHiveMetastore.java b/presto-hive-metastore/src/test/java/com/facebook/presto/hive/MockHiveMetastore.java index 76f69767737ab..e7f4881bb7e80 100644 --- a/presto-hive-metastore/src/test/java/com/facebook/presto/hive/MockHiveMetastore.java +++ b/presto-hive-metastore/src/test/java/com/facebook/presto/hive/MockHiveMetastore.java @@ -15,6 +15,7 @@ import com.facebook.presto.common.predicate.Domain; import com.facebook.presto.hive.metastore.Column; +import com.facebook.presto.hive.metastore.MetastoreContext; import com.facebook.presto.hive.metastore.PartitionNameWithVersion; import com.facebook.presto.hive.metastore.thrift.ThriftHiveMetastore; import com.facebook.presto.spi.PrestoException; @@ -34,12 +35,12 @@ public class MockHiveMetastore public MockHiveMetastore(MockHiveCluster mockHiveCluster) { - super(mockHiveCluster); + super(mockHiveCluster, new MetastoreClientConfig()); this.clientProvider = requireNonNull(mockHiveCluster, "mockHiveCluster is null"); } @Override - public List getPartitionNamesWithVersionByFilter(String databaseName, String tableName, Map partitionPredicates) + public List getPartitionNamesWithVersionByFilter(MetastoreContext context, String databaseName, String tableName, Map partitionPredicates) { try { return clientProvider.createPartitionVersionSupportedMetastoreClient().getPartitionNamesWithVersionByFilter(databaseName, tableName, partitionPredicates); diff --git a/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/TestCachingHiveMetastore.java b/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/TestCachingHiveMetastore.java index 8addb1a9e13ff..89b1ce3e6e296 100644 --- a/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/TestCachingHiveMetastore.java +++ b/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/TestCachingHiveMetastore.java @@ -13,6 +13,7 @@ */ package com.facebook.presto.hive.metastore; +import com.facebook.presto.hive.MetastoreClientConfig; import com.facebook.presto.hive.MockHiveMetastore; import com.facebook.presto.hive.PartitionMutator; import com.facebook.presto.hive.metastore.CachingHiveMetastore.MetastoreCacheScope; @@ -40,6 +41,7 @@ import static com.facebook.presto.hive.metastore.thrift.MockHiveMetastoreClient.BAD_DATABASE; import static com.facebook.presto.hive.metastore.thrift.MockHiveMetastoreClient.PARTITION_VERSION; import static com.facebook.presto.hive.metastore.thrift.MockHiveMetastoreClient.TEST_DATABASE; +import static com.facebook.presto.hive.metastore.thrift.MockHiveMetastoreClient.TEST_METASTORE_CONTEXT; import static com.facebook.presto.hive.metastore.thrift.MockHiveMetastoreClient.TEST_PARTITION1; import static com.facebook.presto.hive.metastore.thrift.MockHiveMetastoreClient.TEST_PARTITION2; import static com.facebook.presto.hive.metastore.thrift.MockHiveMetastoreClient.TEST_ROLES; @@ -67,11 +69,12 @@ public void setUp() mockClient = new MockHiveMetastoreClient(); MockHiveCluster mockHiveCluster = new MockHiveCluster(mockClient); ListeningExecutorService executor = listeningDecorator(newCachedThreadPool(daemonThreadsNamed("test-%s"))); - ThriftHiveMetastore thriftHiveMetastore = new ThriftHiveMetastore(mockHiveCluster); + ThriftHiveMetastore thriftHiveMetastore = new ThriftHiveMetastore(mockHiveCluster, new MetastoreClientConfig()); PartitionMutator hivePartitionMutator = new HivePartitionMutator(); metastore = new CachingHiveMetastore( new BridgingHiveMetastore(thriftHiveMetastore, hivePartitionMutator), executor, + false, new Duration(5, TimeUnit.MINUTES), new Duration(1, TimeUnit.MINUTES), 1000, @@ -84,14 +87,14 @@ public void setUp() public void testGetAllDatabases() { assertEquals(mockClient.getAccessCount(), 0); - assertEquals(metastore.getAllDatabases(), ImmutableList.of(TEST_DATABASE)); + assertEquals(metastore.getAllDatabases(TEST_METASTORE_CONTEXT), ImmutableList.of(TEST_DATABASE)); assertEquals(mockClient.getAccessCount(), 1); - assertEquals(metastore.getAllDatabases(), ImmutableList.of(TEST_DATABASE)); + assertEquals(metastore.getAllDatabases(TEST_METASTORE_CONTEXT), ImmutableList.of(TEST_DATABASE)); assertEquals(mockClient.getAccessCount(), 1); metastore.flushCache(); - assertEquals(metastore.getAllDatabases(), ImmutableList.of(TEST_DATABASE)); + assertEquals(metastore.getAllDatabases(TEST_METASTORE_CONTEXT), ImmutableList.of(TEST_DATABASE)); assertEquals(mockClient.getAccessCount(), 2); } @@ -99,43 +102,44 @@ public void testGetAllDatabases() public void testGetAllTable() { assertEquals(mockClient.getAccessCount(), 0); - assertEquals(metastore.getAllTables(TEST_DATABASE).get(), ImmutableList.of(TEST_TABLE)); + assertEquals(metastore.getAllTables(TEST_METASTORE_CONTEXT, TEST_DATABASE).get(), ImmutableList.of(TEST_TABLE)); assertEquals(mockClient.getAccessCount(), 1); - assertEquals(metastore.getAllTables(TEST_DATABASE).get(), ImmutableList.of(TEST_TABLE)); + assertEquals(metastore.getAllTables(TEST_METASTORE_CONTEXT, TEST_DATABASE).get(), ImmutableList.of(TEST_TABLE)); assertEquals(mockClient.getAccessCount(), 1); metastore.flushCache(); - assertEquals(metastore.getAllTables(TEST_DATABASE).get(), ImmutableList.of(TEST_TABLE)); + assertEquals(metastore.getAllTables(TEST_METASTORE_CONTEXT, TEST_DATABASE).get(), ImmutableList.of(TEST_TABLE)); assertEquals(mockClient.getAccessCount(), 2); } public void testInvalidDbGetAllTAbles() { - assertFalse(metastore.getAllTables(BAD_DATABASE).isPresent()); + assertFalse(metastore.getAllTables(TEST_METASTORE_CONTEXT, BAD_DATABASE).isPresent()); } @Test public void testGetTable() { assertEquals(mockClient.getAccessCount(), 0); - assertNotNull(metastore.getTable(TEST_DATABASE, TEST_TABLE)); + assertNotNull(metastore.getTable(TEST_METASTORE_CONTEXT, TEST_DATABASE, TEST_TABLE)); assertEquals(mockClient.getAccessCount(), 1); - assertNotNull(metastore.getTable(TEST_DATABASE, TEST_TABLE)); + assertNotNull(metastore.getTable(TEST_METASTORE_CONTEXT, TEST_DATABASE, TEST_TABLE)); assertEquals(mockClient.getAccessCount(), 1); metastore.flushCache(); - assertNotNull(metastore.getTable(TEST_DATABASE, TEST_TABLE)); + assertNotNull(metastore.getTable(TEST_METASTORE_CONTEXT, TEST_DATABASE, TEST_TABLE)); assertEquals(mockClient.getAccessCount(), 2); } public void testInvalidDbGetTable() { - assertFalse(metastore.getTable(BAD_DATABASE, TEST_TABLE).isPresent()); + assertFalse(metastore.getTable(TEST_METASTORE_CONTEXT, BAD_DATABASE, TEST_TABLE).isPresent()); assertEquals(stats.getGetTable().getThriftExceptions().getTotalCount(), 0); assertEquals(stats.getGetTable().getTotalFailures().getTotalCount(), 0); + assertNotNull(stats.getGetTable().getTime()); } @Test @@ -143,21 +147,21 @@ public void testGetPartitionNames() { ImmutableList expectedPartitions = ImmutableList.of(TEST_PARTITION1, TEST_PARTITION2); assertEquals(mockClient.getAccessCount(), 0); - assertEquals(metastore.getPartitionNames(TEST_DATABASE, TEST_TABLE).get(), expectedPartitions); + assertEquals(metastore.getPartitionNames(TEST_METASTORE_CONTEXT, TEST_DATABASE, TEST_TABLE).get(), expectedPartitions); assertEquals(mockClient.getAccessCount(), 1); - assertEquals(metastore.getPartitionNames(TEST_DATABASE, TEST_TABLE).get(), expectedPartitions); + assertEquals(metastore.getPartitionNames(TEST_METASTORE_CONTEXT, TEST_DATABASE, TEST_TABLE).get(), expectedPartitions); assertEquals(mockClient.getAccessCount(), 1); metastore.flushCache(); - assertEquals(metastore.getPartitionNames(TEST_DATABASE, TEST_TABLE).get(), expectedPartitions); + assertEquals(metastore.getPartitionNames(TEST_METASTORE_CONTEXT, TEST_DATABASE, TEST_TABLE).get(), expectedPartitions); assertEquals(mockClient.getAccessCount(), 2); } @Test public void testInvalidGetPartitionNames() { - assertEquals(metastore.getPartitionNames(BAD_DATABASE, TEST_TABLE).get(), ImmutableList.of()); + assertEquals(metastore.getPartitionNames(TEST_METASTORE_CONTEXT, BAD_DATABASE, TEST_TABLE).get(), ImmutableList.of()); } @Test @@ -166,14 +170,14 @@ public void testGetPartitionNamesByParts() ImmutableList expectedPartitions = ImmutableList.of(TEST_PARTITION1, TEST_PARTITION2); assertEquals(mockClient.getAccessCount(), 0); - assertEquals(metastore.getPartitionNamesByFilter(TEST_DATABASE, TEST_TABLE, ImmutableMap.of()), expectedPartitions); + assertEquals(metastore.getPartitionNamesByFilter(TEST_METASTORE_CONTEXT, TEST_DATABASE, TEST_TABLE, ImmutableMap.of()), expectedPartitions); assertEquals(mockClient.getAccessCount(), 1); - assertEquals(metastore.getPartitionNamesByFilter(TEST_DATABASE, TEST_TABLE, ImmutableMap.of()), expectedPartitions); + assertEquals(metastore.getPartitionNamesByFilter(TEST_METASTORE_CONTEXT, TEST_DATABASE, TEST_TABLE, ImmutableMap.of()), expectedPartitions); assertEquals(mockClient.getAccessCount(), 1); metastore.flushCache(); - assertEquals(metastore.getPartitionNamesByFilter(TEST_DATABASE, TEST_TABLE, ImmutableMap.of()), expectedPartitions); + assertEquals(metastore.getPartitionNamesByFilter(TEST_METASTORE_CONTEXT, TEST_DATABASE, TEST_TABLE, ImmutableMap.of()), expectedPartitions); assertEquals(mockClient.getAccessCount(), 2); } @@ -207,6 +211,7 @@ public void testCachingWithPartitionVersioning() CachingHiveMetastore partitionCachingEnabledmetastore = new CachingHiveMetastore( new BridgingHiveMetastore(mockHiveMetastore, mockPartitionMutator), executor, + false, new Duration(5, TimeUnit.MINUTES), new Duration(1, TimeUnit.MINUTES), 1000, @@ -214,27 +219,27 @@ public void testCachingWithPartitionVersioning() MetastoreCacheScope.PARTITION); assertEquals(mockClient.getAccessCount(), 0); - assertEquals(partitionCachingEnabledmetastore.getPartitionNamesByFilter(TEST_DATABASE, TEST_TABLE, ImmutableMap.of()), EXPECTED_PARTITIONS); + assertEquals(partitionCachingEnabledmetastore.getPartitionNamesByFilter(TEST_METASTORE_CONTEXT, TEST_DATABASE, TEST_TABLE, ImmutableMap.of()), EXPECTED_PARTITIONS); assertEquals(mockClient.getAccessCount(), 1); - assertEquals(partitionCachingEnabledmetastore.getPartitionNamesByFilter(TEST_DATABASE, TEST_TABLE, ImmutableMap.of()), EXPECTED_PARTITIONS); + assertEquals(partitionCachingEnabledmetastore.getPartitionNamesByFilter(TEST_METASTORE_CONTEXT, TEST_DATABASE, TEST_TABLE, ImmutableMap.of()), EXPECTED_PARTITIONS); // Assert that we did not hit cache assertEquals(mockClient.getAccessCount(), 2); // Select all of the available partitions and load them into the cache - assertEquals(partitionCachingEnabledmetastore.getPartitionsByNames(TEST_DATABASE, TEST_TABLE, ImmutableList.of(TEST_PARTITION1, TEST_PARTITION2)).size(), 2); + assertEquals(partitionCachingEnabledmetastore.getPartitionsByNames(TEST_METASTORE_CONTEXT, TEST_DATABASE, TEST_TABLE, ImmutableList.of(TEST_PARTITION1, TEST_PARTITION2)).size(), 2); assertEquals(mockClient.getAccessCount(), 3); // Now if we fetch any or both of them, they should hit the cache - assertEquals(partitionCachingEnabledmetastore.getPartitionsByNames(TEST_DATABASE, TEST_TABLE, ImmutableList.of(TEST_PARTITION1)).size(), 1); - assertEquals(partitionCachingEnabledmetastore.getPartitionsByNames(TEST_DATABASE, TEST_TABLE, ImmutableList.of(TEST_PARTITION2)).size(), 1); - assertEquals(partitionCachingEnabledmetastore.getPartitionsByNames(TEST_DATABASE, TEST_TABLE, ImmutableList.of(TEST_PARTITION1, TEST_PARTITION2)).size(), 2); + assertEquals(partitionCachingEnabledmetastore.getPartitionsByNames(TEST_METASTORE_CONTEXT, TEST_DATABASE, TEST_TABLE, ImmutableList.of(TEST_PARTITION1)).size(), 1); + assertEquals(partitionCachingEnabledmetastore.getPartitionsByNames(TEST_METASTORE_CONTEXT, TEST_DATABASE, TEST_TABLE, ImmutableList.of(TEST_PARTITION2)).size(), 1); + assertEquals(partitionCachingEnabledmetastore.getPartitionsByNames(TEST_METASTORE_CONTEXT, TEST_DATABASE, TEST_TABLE, ImmutableList.of(TEST_PARTITION1, TEST_PARTITION2)).size(), 2); assertEquals(mockClient.getAccessCount(), 3); // This call should NOT invalidate the partition cache because partition version is same as before - assertEquals(partitionCachingEnabledmetastore.getPartitionNamesByFilter(TEST_DATABASE, TEST_TABLE, ImmutableMap.of()), EXPECTED_PARTITIONS); + assertEquals(partitionCachingEnabledmetastore.getPartitionNamesByFilter(TEST_METASTORE_CONTEXT, TEST_DATABASE, TEST_TABLE, ImmutableMap.of()), EXPECTED_PARTITIONS); assertEquals(mockClient.getAccessCount(), 4); - assertEquals(partitionCachingEnabledmetastore.getPartitionsByNames(TEST_DATABASE, TEST_TABLE, ImmutableList.of(TEST_PARTITION1, TEST_PARTITION2)).size(), 2); + assertEquals(partitionCachingEnabledmetastore.getPartitionsByNames(TEST_METASTORE_CONTEXT, TEST_DATABASE, TEST_TABLE, ImmutableList.of(TEST_PARTITION1, TEST_PARTITION2)).size(), 2); // Assert that its a cache hit assertEquals(mockClient.getAccessCount(), 4); @@ -251,6 +256,7 @@ private void assertInvalidateCache(MockPartitionMutator partitionMutator) CachingHiveMetastore partitionCachingEnabledmetastore = new CachingHiveMetastore( new BridgingHiveMetastore(mockHiveMetastore, partitionMutator), executor, + false, new Duration(5, TimeUnit.MINUTES), new Duration(1, TimeUnit.MINUTES), 1000, @@ -259,9 +265,9 @@ private void assertInvalidateCache(MockPartitionMutator partitionMutator) int clientAccessCount = 0; for (int i = 0; i < 100; i++) { - assertEquals(partitionCachingEnabledmetastore.getPartitionNamesByFilter(TEST_DATABASE, TEST_TABLE, ImmutableMap.of()), EXPECTED_PARTITIONS); + assertEquals(partitionCachingEnabledmetastore.getPartitionNamesByFilter(TEST_METASTORE_CONTEXT, TEST_DATABASE, TEST_TABLE, ImmutableMap.of()), EXPECTED_PARTITIONS); assertEquals(mockClient.getAccessCount(), ++clientAccessCount); - assertEquals(partitionCachingEnabledmetastore.getPartitionsByNames(TEST_DATABASE, TEST_TABLE, ImmutableList.of(TEST_PARTITION1, TEST_PARTITION2)).size(), 2); + assertEquals(partitionCachingEnabledmetastore.getPartitionsByNames(TEST_METASTORE_CONTEXT, TEST_DATABASE, TEST_TABLE, ImmutableList.of(TEST_PARTITION1, TEST_PARTITION2)).size(), 2); // Assert that we did not hit cache assertEquals(mockClient.getAccessCount(), ++clientAccessCount); } @@ -269,35 +275,35 @@ private void assertInvalidateCache(MockPartitionMutator partitionMutator) public void testInvalidGetPartitionNamesByParts() { - assertTrue(metastore.getPartitionNamesByFilter(BAD_DATABASE, TEST_TABLE, ImmutableMap.of()).isEmpty()); + assertTrue(metastore.getPartitionNamesByFilter(TEST_METASTORE_CONTEXT, BAD_DATABASE, TEST_TABLE, ImmutableMap.of()).isEmpty()); } @Test public void testGetPartitionsByNames() { assertEquals(mockClient.getAccessCount(), 0); - metastore.getTable(TEST_DATABASE, TEST_TABLE); + metastore.getTable(TEST_METASTORE_CONTEXT, TEST_DATABASE, TEST_TABLE); assertEquals(mockClient.getAccessCount(), 1); // Select half of the available partitions and load them into the cache - assertEquals(metastore.getPartitionsByNames(TEST_DATABASE, TEST_TABLE, ImmutableList.of(TEST_PARTITION1)).size(), 1); + assertEquals(metastore.getPartitionsByNames(TEST_METASTORE_CONTEXT, TEST_DATABASE, TEST_TABLE, ImmutableList.of(TEST_PARTITION1)).size(), 1); assertEquals(mockClient.getAccessCount(), 2); // Now select all of the partitions - assertEquals(metastore.getPartitionsByNames(TEST_DATABASE, TEST_TABLE, ImmutableList.of(TEST_PARTITION1, TEST_PARTITION2)).size(), 2); + assertEquals(metastore.getPartitionsByNames(TEST_METASTORE_CONTEXT, TEST_DATABASE, TEST_TABLE, ImmutableList.of(TEST_PARTITION1, TEST_PARTITION2)).size(), 2); // There should be one more access to fetch the remaining partition assertEquals(mockClient.getAccessCount(), 3); // Now if we fetch any or both of them, they should not hit the client - assertEquals(metastore.getPartitionsByNames(TEST_DATABASE, TEST_TABLE, ImmutableList.of(TEST_PARTITION1)).size(), 1); - assertEquals(metastore.getPartitionsByNames(TEST_DATABASE, TEST_TABLE, ImmutableList.of(TEST_PARTITION2)).size(), 1); - assertEquals(metastore.getPartitionsByNames(TEST_DATABASE, TEST_TABLE, ImmutableList.of(TEST_PARTITION1, TEST_PARTITION2)).size(), 2); + assertEquals(metastore.getPartitionsByNames(TEST_METASTORE_CONTEXT, TEST_DATABASE, TEST_TABLE, ImmutableList.of(TEST_PARTITION1)).size(), 1); + assertEquals(metastore.getPartitionsByNames(TEST_METASTORE_CONTEXT, TEST_DATABASE, TEST_TABLE, ImmutableList.of(TEST_PARTITION2)).size(), 1); + assertEquals(metastore.getPartitionsByNames(TEST_METASTORE_CONTEXT, TEST_DATABASE, TEST_TABLE, ImmutableList.of(TEST_PARTITION1, TEST_PARTITION2)).size(), 2); assertEquals(mockClient.getAccessCount(), 3); metastore.flushCache(); // Fetching both should only result in one batched access - assertEquals(metastore.getPartitionsByNames(TEST_DATABASE, TEST_TABLE, ImmutableList.of(TEST_PARTITION1, TEST_PARTITION2)).size(), 2); + assertEquals(metastore.getPartitionsByNames(TEST_METASTORE_CONTEXT, TEST_DATABASE, TEST_TABLE, ImmutableList.of(TEST_PARTITION1, TEST_PARTITION2)).size(), 2); assertEquals(mockClient.getAccessCount(), 4); } @@ -307,31 +313,31 @@ public void testListRoles() { assertEquals(mockClient.getAccessCount(), 0); - assertEquals(metastore.listRoles(), TEST_ROLES); + assertEquals(metastore.listRoles(TEST_METASTORE_CONTEXT), TEST_ROLES); assertEquals(mockClient.getAccessCount(), 1); - assertEquals(metastore.listRoles(), TEST_ROLES); + assertEquals(metastore.listRoles(TEST_METASTORE_CONTEXT), TEST_ROLES); assertEquals(mockClient.getAccessCount(), 1); metastore.flushCache(); - assertEquals(metastore.listRoles(), TEST_ROLES); + assertEquals(metastore.listRoles(TEST_METASTORE_CONTEXT), TEST_ROLES); assertEquals(mockClient.getAccessCount(), 2); - metastore.createRole("role", "grantor"); + metastore.createRole(TEST_METASTORE_CONTEXT, "role", "grantor"); - assertEquals(metastore.listRoles(), TEST_ROLES); + assertEquals(metastore.listRoles(TEST_METASTORE_CONTEXT), TEST_ROLES); assertEquals(mockClient.getAccessCount(), 3); - metastore.dropRole("testrole"); + metastore.dropRole(TEST_METASTORE_CONTEXT, "testrole"); - assertEquals(metastore.listRoles(), TEST_ROLES); + assertEquals(metastore.listRoles(TEST_METASTORE_CONTEXT), TEST_ROLES); assertEquals(mockClient.getAccessCount(), 4); } public void testInvalidGetPartitionsByNames() { - Map> partitionsByNames = metastore.getPartitionsByNames(BAD_DATABASE, TEST_TABLE, ImmutableList.of(TEST_PARTITION1)); + Map> partitionsByNames = metastore.getPartitionsByNames(TEST_METASTORE_CONTEXT, BAD_DATABASE, TEST_TABLE, ImmutableList.of(TEST_PARTITION1)); assertEquals(partitionsByNames.size(), 1); Optional onlyElement = Iterables.getOnlyElement(partitionsByNames.values()); assertFalse(onlyElement.isPresent()); @@ -343,7 +349,7 @@ public void testNoCacheExceptions() // Throw exceptions on usage mockClient.setThrowException(true); try { - metastore.getAllDatabases(); + metastore.getAllDatabases(TEST_METASTORE_CONTEXT); } catch (RuntimeException ignored) { } @@ -351,7 +357,7 @@ public void testNoCacheExceptions() // Second try should hit the client again try { - metastore.getAllDatabases(); + metastore.getAllDatabases(TEST_METASTORE_CONTEXT); } catch (RuntimeException ignored) { } @@ -369,7 +375,7 @@ private MockHiveCluster(MockHiveMetastoreClient client) } @Override - public HiveMetastoreClient createMetastoreClient() + public HiveMetastoreClient createMetastoreClient(Optional token) { return client; } diff --git a/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/TestMetastoreClientConfig.java b/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/TestMetastoreClientConfig.java index 5c790bbc7413d..d486d6c0e4c32 100644 --- a/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/TestMetastoreClientConfig.java +++ b/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/TestMetastoreClientConfig.java @@ -43,7 +43,8 @@ public void testDefaults() .setRecordingDuration(new Duration(0, TimeUnit.MINUTES)) .setReplay(false) .setPartitionVersioningEnabled(false) - .setMetastoreCacheScope(MetastoreCacheScope.ALL)); + .setMetastoreCacheScope(MetastoreCacheScope.ALL) + .setMetastoreImpersonationEnabled(false)); } @Test @@ -64,6 +65,7 @@ public void testExplicitPropertyMappings() .put("hive.replay-metastore-recording", "true") .put("hive.partition-versioning-enabled", "true") .put("hive.metastore-cache-scope", "PARTITION") + .put("hive.metastore-impersonation-enabled", "true") .build(); MetastoreClientConfig expected = new MetastoreClientConfig() @@ -80,7 +82,8 @@ public void testExplicitPropertyMappings() .setRecordingDuration(new Duration(42, TimeUnit.SECONDS)) .setReplay(true) .setPartitionVersioningEnabled(true) - .setMetastoreCacheScope(MetastoreCacheScope.PARTITION); + .setMetastoreCacheScope(MetastoreCacheScope.PARTITION) + .setMetastoreImpersonationEnabled(true); ConfigAssertions.assertFullMapping(properties, expected); } diff --git a/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/TestRecordingHiveMetastore.java b/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/TestRecordingHiveMetastore.java index 99a0eecbb5634..e624992252009 100644 --- a/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/TestRecordingHiveMetastore.java +++ b/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/TestRecordingHiveMetastore.java @@ -125,7 +125,7 @@ public void testRecordingHiveMetastore() RecordingHiveMetastore recordingHiveMetastore = new RecordingHiveMetastore(new TestingHiveMetastore(), recordingHiveClientConfig); validateMetadata(recordingHiveMetastore); - recordingHiveMetastore.dropDatabase("other_database"); + recordingHiveMetastore.dropDatabase(new MetastoreContext("test_user"), "other_database"); recordingHiveMetastore.writeRecording(); MetastoreClientConfig replayingHiveClientConfig = recordingHiveClientConfig @@ -138,31 +138,31 @@ public void testRecordingHiveMetastore() private void validateMetadata(ExtendedHiveMetastore hiveMetastore) { - assertEquals(hiveMetastore.getDatabase("database"), Optional.of(DATABASE)); - assertEquals(hiveMetastore.getAllDatabases(), ImmutableList.of("database")); - assertEquals(hiveMetastore.getTable("database", "table"), Optional.of(TABLE)); - assertEquals(hiveMetastore.getSupportedColumnStatistics(createVarcharType(123)), ImmutableSet.of(MIN_VALUE, MAX_VALUE)); - assertEquals(hiveMetastore.getTableStatistics("database", "table"), PARTITION_STATISTICS); - assertEquals(hiveMetastore.getPartitionStatistics("database", "table", ImmutableSet.of("value")), ImmutableMap.of("value", PARTITION_STATISTICS)); - assertEquals(hiveMetastore.getAllTables("database"), Optional.of(ImmutableList.of("table"))); - assertEquals(hiveMetastore.getAllViews("database"), Optional.empty()); - assertEquals(hiveMetastore.getPartition("database", "table", ImmutableList.of("value")), Optional.of(PARTITION)); - assertEquals(hiveMetastore.getPartitionNames("database", "table"), Optional.of(ImmutableList.of("value"))); + assertEquals(hiveMetastore.getDatabase(new MetastoreContext("test_user"), "database"), Optional.of(DATABASE)); + assertEquals(hiveMetastore.getAllDatabases(new MetastoreContext("test_user")), ImmutableList.of("database")); + assertEquals(hiveMetastore.getTable(new MetastoreContext("test_user"), "database", "table"), Optional.of(TABLE)); + assertEquals(hiveMetastore.getSupportedColumnStatistics(new MetastoreContext("test_user"), createVarcharType(123)), ImmutableSet.of(MIN_VALUE, MAX_VALUE)); + assertEquals(hiveMetastore.getTableStatistics(new MetastoreContext("test_user"), "database", "table"), PARTITION_STATISTICS); + assertEquals(hiveMetastore.getPartitionStatistics(new MetastoreContext("test_user"), "database", "table", ImmutableSet.of("value")), ImmutableMap.of("value", PARTITION_STATISTICS)); + assertEquals(hiveMetastore.getAllTables(new MetastoreContext("test_user"), "database"), Optional.of(ImmutableList.of("table"))); + assertEquals(hiveMetastore.getAllViews(new MetastoreContext("test_user"), "database"), Optional.empty()); + assertEquals(hiveMetastore.getPartition(new MetastoreContext("test_user"), "database", "table", ImmutableList.of("value")), Optional.of(PARTITION)); + assertEquals(hiveMetastore.getPartitionNames(new MetastoreContext("test_user"), "database", "table"), Optional.of(ImmutableList.of("value"))); Map map = new HashMap<>(); Column column = new Column("column", HiveType.HIVE_STRING, Optional.empty()); map.put(column, Domain.singleValue(VARCHAR, utf8Slice("value"))); - assertEquals(hiveMetastore.getPartitionNamesByFilter("database", "table", map), ImmutableList.of("value")); - assertEquals(hiveMetastore.getPartitionsByNames("database", "table", ImmutableList.of("value")), ImmutableMap.of("value", Optional.of(PARTITION))); - assertEquals(hiveMetastore.listTablePrivileges("database", "table", new PrestoPrincipal(USER, "user")), ImmutableSet.of(PRIVILEGE_INFO)); - assertEquals(hiveMetastore.listRoles(), ImmutableSet.of("role")); - assertEquals(hiveMetastore.listRoleGrants(new PrestoPrincipal(USER, "user")), ImmutableSet.of(ROLE_GRANT)); + assertEquals(hiveMetastore.getPartitionNamesByFilter(new MetastoreContext("test_user"), "database", "table", map), ImmutableList.of("value")); + assertEquals(hiveMetastore.getPartitionsByNames(new MetastoreContext("test_user"), "database", "table", ImmutableList.of("value")), ImmutableMap.of("value", Optional.of(PARTITION))); + assertEquals(hiveMetastore.listTablePrivileges(new MetastoreContext("test_user"), "database", "table", new PrestoPrincipal(USER, "user")), ImmutableSet.of(PRIVILEGE_INFO)); + assertEquals(hiveMetastore.listRoles(new MetastoreContext("test_user")), ImmutableSet.of("role")); + assertEquals(hiveMetastore.listRoleGrants(new MetastoreContext("test_user"), new PrestoPrincipal(USER, "user")), ImmutableSet.of(ROLE_GRANT)); } private static class TestingHiveMetastore extends UnimplementedHiveMetastore { @Override - public Optional getDatabase(String databaseName) + public Optional getDatabase(MetastoreContext metastoreContext, String databaseName) { if (databaseName.equals("database")) { return Optional.of(DATABASE); @@ -172,13 +172,13 @@ public Optional getDatabase(String databaseName) } @Override - public List getAllDatabases() + public List getAllDatabases(MetastoreContext metastoreContext) { return ImmutableList.of("database"); } @Override - public Optional
getTable(String databaseName, String tableName) + public Optional
getTable(MetastoreContext metastoreContext, String databaseName, String tableName) { if (databaseName.equals("database") && tableName.equals("table")) { return Optional.of(TABLE); @@ -188,7 +188,7 @@ public Optional
getTable(String databaseName, String tableName) } @Override - public Set getSupportedColumnStatistics(Type type) + public Set getSupportedColumnStatistics(MetastoreContext metastoreContext, Type type) { if (type.equals(createVarcharType(123))) { return ImmutableSet.of(MIN_VALUE, MAX_VALUE); @@ -198,7 +198,7 @@ public Set getSupportedColumnStatistics(Type type) } @Override - public PartitionStatistics getTableStatistics(String databaseName, String tableName) + public PartitionStatistics getTableStatistics(MetastoreContext metastoreContext, String databaseName, String tableName) { if (databaseName.equals("database") && tableName.equals("table")) { return PARTITION_STATISTICS; @@ -208,7 +208,7 @@ public PartitionStatistics getTableStatistics(String databaseName, String tableN } @Override - public Map getPartitionStatistics(String databaseName, String tableName, Set partitionNames) + public Map getPartitionStatistics(MetastoreContext metastoreContext, String databaseName, String tableName, Set partitionNames) { if (databaseName.equals("database") && tableName.equals("table") && partitionNames.contains("value")) { return ImmutableMap.of("value", PARTITION_STATISTICS); @@ -218,7 +218,7 @@ public Map getPartitionStatistics(String databaseNa } @Override - public Optional> getAllTables(String databaseName) + public Optional> getAllTables(MetastoreContext metastoreContext, String databaseName) { if (databaseName.equals("database")) { return Optional.of(ImmutableList.of("table")); @@ -228,19 +228,19 @@ public Optional> getAllTables(String databaseName) } @Override - public Optional> getAllViews(String databaseName) + public Optional> getAllViews(MetastoreContext metastoreContext, String databaseName) { return Optional.empty(); } @Override - public void dropDatabase(String databaseName) + public void dropDatabase(MetastoreContext metastoreContext, String databaseName) { // noop for test purpose } @Override - public Optional getPartition(String databaseName, String tableName, List partitionValues) + public Optional getPartition(MetastoreContext metastoreContext, String databaseName, String tableName, List partitionValues) { if (databaseName.equals("database") && tableName.equals("table") && partitionValues.equals(ImmutableList.of("value"))) { return Optional.of(PARTITION); @@ -250,7 +250,7 @@ public Optional getPartition(String databaseName, String tableName, L } @Override - public Optional> getPartitionNames(String databaseName, String tableName) + public Optional> getPartitionNames(MetastoreContext metastoreContext, String databaseName, String tableName) { if (databaseName.equals("database") && tableName.equals("table")) { return Optional.of(ImmutableList.of("value")); @@ -261,6 +261,7 @@ public Optional> getPartitionNames(String databaseName, String tabl @Override public List getPartitionNamesByFilter( + MetastoreContext metastoreContext, String databaseName, String tableName, Map partitionPredicates) @@ -274,7 +275,7 @@ public List getPartitionNamesByFilter( } @Override - public Map> getPartitionsByNames(String databaseName, String tableName, List partitionNames) + public Map> getPartitionsByNames(MetastoreContext metastoreContext, String databaseName, String tableName, List partitionNames) { if (databaseName.equals("database") && tableName.equals("table") && partitionNames.contains("value")) { return ImmutableMap.of("value", Optional.of(PARTITION)); @@ -284,7 +285,7 @@ public Map> getPartitionsByNames(String databaseName } @Override - public Set listTablePrivileges(String database, String table, PrestoPrincipal prestoPrincipal) + public Set listTablePrivileges(MetastoreContext metastoreContext, String database, String table, PrestoPrincipal prestoPrincipal) { if (database.equals("database") && table.equals("table") && prestoPrincipal.getType() == USER && prestoPrincipal.getName().equals("user")) { return ImmutableSet.of(PRIVILEGE_INFO); @@ -294,13 +295,13 @@ public Set listTablePrivileges(String database, String table, } @Override - public Set listRoles() + public Set listRoles(MetastoreContext metastoreContext) { return ImmutableSet.of("role"); } @Override - public Set listRoleGrants(PrestoPrincipal principal) + public Set listRoleGrants(MetastoreContext metastoreContext, PrestoPrincipal principal) { return ImmutableSet.of(ROLE_GRANT); } diff --git a/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/UnimplementedHiveMetastore.java b/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/UnimplementedHiveMetastore.java index c434942311afe..3ebfebade8486 100644 --- a/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/UnimplementedHiveMetastore.java +++ b/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/UnimplementedHiveMetastore.java @@ -30,139 +30,140 @@ public class UnimplementedHiveMetastore implements ExtendedHiveMetastore { @Override - public Optional getDatabase(String databaseName) + public Optional getDatabase(MetastoreContext metastoreContext, String databaseName) { throw new UnsupportedOperationException(); } @Override - public List getAllDatabases() + public List getAllDatabases(MetastoreContext metastoreContext) { throw new UnsupportedOperationException(); } @Override - public Optional
getTable(String databaseName, String tableName) + public Optional
getTable(MetastoreContext metastoreContext, String databaseName, String tableName) { throw new UnsupportedOperationException(); } @Override - public Set getSupportedColumnStatistics(Type type) + public Set getSupportedColumnStatistics(MetastoreContext metastoreContext, Type type) { throw new UnsupportedOperationException(); } @Override - public PartitionStatistics getTableStatistics(String databaseName, String tableName) + public PartitionStatistics getTableStatistics(MetastoreContext metastoreContext, String databaseName, String tableName) { throw new UnsupportedOperationException(); } @Override - public Map getPartitionStatistics(String databaseName, String tableName, Set partitionNames) + public Map getPartitionStatistics(MetastoreContext metastoreContext, String databaseName, String tableName, Set partitionNames) { throw new UnsupportedOperationException(); } @Override - public void updateTableStatistics(String databaseName, String tableName, Function update) + public void updateTableStatistics(MetastoreContext metastoreContext, String databaseName, String tableName, Function update) { throw new UnsupportedOperationException(); } @Override - public void updatePartitionStatistics(String databaseName, String tableName, String partitionName, Function update) + public void updatePartitionStatistics(MetastoreContext metastoreContext, String databaseName, String tableName, String partitionName, Function update) { throw new UnsupportedOperationException(); } @Override - public Optional> getAllTables(String databaseName) + public Optional> getAllTables(MetastoreContext metastoreContext, String databaseName) { throw new UnsupportedOperationException(); } @Override - public Optional> getAllViews(String databaseName) + public Optional> getAllViews(MetastoreContext metastoreContext, String databaseName) { throw new UnsupportedOperationException(); } @Override - public void createDatabase(Database database) + public void createDatabase(MetastoreContext metastoreContext, Database database) { throw new UnsupportedOperationException(); } @Override - public void dropDatabase(String databaseName) + public void dropDatabase(MetastoreContext metastoreContext, String databaseName) { throw new UnsupportedOperationException(); } @Override - public void renameDatabase(String databaseName, String newDatabaseName) + public void renameDatabase(MetastoreContext metastoreContext, String databaseName, String newDatabaseName) { throw new UnsupportedOperationException(); } @Override - public void createTable(Table table, PrincipalPrivileges principalPrivileges) + public void createTable(MetastoreContext metastoreContext, Table table, PrincipalPrivileges principalPrivileges) { throw new UnsupportedOperationException(); } @Override - public void dropTable(String databaseName, String tableName, boolean deleteData) + public void dropTable(MetastoreContext metastoreContext, String databaseName, String tableName, boolean deleteData) { throw new UnsupportedOperationException(); } @Override - public void replaceTable(String databaseName, String tableName, Table newTable, PrincipalPrivileges principalPrivileges) + public void replaceTable(MetastoreContext metastoreContext, String databaseName, String tableName, Table newTable, PrincipalPrivileges principalPrivileges) { throw new UnsupportedOperationException(); } @Override - public void renameTable(String databaseName, String tableName, String newDatabaseName, String newTableName) + public void renameTable(MetastoreContext metastoreContext, String databaseName, String tableName, String newDatabaseName, String newTableName) { throw new UnsupportedOperationException(); } @Override - public void addColumn(String databaseName, String tableName, String columnName, HiveType columnType, String columnComment) + public void addColumn(MetastoreContext metastoreContext, String databaseName, String tableName, String columnName, HiveType columnType, String columnComment) { throw new UnsupportedOperationException(); } @Override - public void renameColumn(String databaseName, String tableName, String oldColumnName, String newColumnName) + public void renameColumn(MetastoreContext metastoreContext, String databaseName, String tableName, String oldColumnName, String newColumnName) { throw new UnsupportedOperationException(); } @Override - public void dropColumn(String databaseName, String tableName, String columnName) + public void dropColumn(MetastoreContext metastoreContext, String databaseName, String tableName, String columnName) { throw new UnsupportedOperationException(); } @Override - public Optional getPartition(String databaseName, String tableName, List partitionValues) + public Optional getPartition(MetastoreContext metastoreContext, String databaseName, String tableName, List partitionValues) { throw new UnsupportedOperationException(); } @Override - public Optional> getPartitionNames(String databaseName, String tableName) + public Optional> getPartitionNames(MetastoreContext metastoreContext, String databaseName, String tableName) { throw new UnsupportedOperationException(); } @Override public List getPartitionNamesByFilter( + MetastoreContext metastoreContext, String databaseName, String tableName, Map partitionPredicates) @@ -172,6 +173,7 @@ public List getPartitionNamesByFilter( @Override public List getPartitionNamesWithVersionByFilter( + MetastoreContext metastoreContext, String databaseName, String tableName, Map partitionPredicates) @@ -180,79 +182,79 @@ public List getPartitionNamesWithVersionByFilter( } @Override - public Map> getPartitionsByNames(String databaseName, String tableName, List partitionNames) + public Map> getPartitionsByNames(MetastoreContext metastoreContext, String databaseName, String tableName, List partitionNames) { throw new UnsupportedOperationException(); } @Override - public void addPartitions(String databaseName, String tableName, List partitions) + public void addPartitions(MetastoreContext metastoreContext, String databaseName, String tableName, List partitions) { throw new UnsupportedOperationException(); } @Override - public void dropPartition(String databaseName, String tableName, List parts, boolean deleteData) + public void dropPartition(MetastoreContext metastoreContext, String databaseName, String tableName, List parts, boolean deleteData) { throw new UnsupportedOperationException(); } @Override - public void alterPartition(String databaseName, String tableName, PartitionWithStatistics partition) + public void alterPartition(MetastoreContext metastoreContext, String databaseName, String tableName, PartitionWithStatistics partition) { throw new UnsupportedOperationException(); } @Override - public void createRole(String role, String grantor) + public void createRole(MetastoreContext metastoreContext, String role, String grantor) { throw new UnsupportedOperationException(); } @Override - public void dropRole(String role) + public void dropRole(MetastoreContext metastoreContext, String role) { throw new UnsupportedOperationException(); } @Override - public Set listRoles() + public Set listRoles(MetastoreContext metastoreContext) { throw new UnsupportedOperationException(); } @Override - public void grantTablePrivileges(String databaseName, String tableName, PrestoPrincipal grantee, Set privileges) + public void grantTablePrivileges(MetastoreContext metastoreContext, String databaseName, String tableName, PrestoPrincipal grantee, Set privileges) { throw new UnsupportedOperationException(); } @Override - public void revokeTablePrivileges(String databaseName, String tableName, PrestoPrincipal grantee, Set privileges) + public void revokeTablePrivileges(MetastoreContext metastoreContext, String databaseName, String tableName, PrestoPrincipal grantee, Set privileges) { throw new UnsupportedOperationException(); } @Override - public Set listTablePrivileges(String databaseName, String tableName, PrestoPrincipal principal) + public Set listTablePrivileges(MetastoreContext metastoreContext, String databaseName, String tableName, PrestoPrincipal principal) { throw new UnsupportedOperationException(); } @Override - public void grantRoles(Set roles, Set grantees, boolean withAdminOption, PrestoPrincipal grantor) + public void grantRoles(MetastoreContext metastoreContext, Set roles, Set grantees, boolean withAdminOption, PrestoPrincipal grantor) { throw new UnsupportedOperationException(); } @Override - public void revokeRoles(Set roles, Set grantees, boolean adminOptionFor, PrestoPrincipal grantor) + public void revokeRoles(MetastoreContext metastoreContext, Set roles, Set grantees, boolean adminOptionFor, PrestoPrincipal grantor) { throw new UnsupportedOperationException(); } @Override - public Set listRoleGrants(PrestoPrincipal principal) + public Set listRoleGrants(MetastoreContext metastoreContext, PrestoPrincipal principal) { throw new UnsupportedOperationException(); } diff --git a/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/thrift/InMemoryHiveMetastore.java b/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/thrift/InMemoryHiveMetastore.java index 37bf8c4216a85..ee23f4997c018 100644 --- a/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/thrift/InMemoryHiveMetastore.java +++ b/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/thrift/InMemoryHiveMetastore.java @@ -19,6 +19,7 @@ import com.facebook.presto.hive.TableAlreadyExistsException; import com.facebook.presto.hive.metastore.Column; import com.facebook.presto.hive.metastore.HivePrivilegeInfo; +import com.facebook.presto.hive.metastore.MetastoreContext; import com.facebook.presto.hive.metastore.MetastoreUtil; import com.facebook.presto.hive.metastore.PartitionStatistics; import com.facebook.presto.hive.metastore.PartitionWithStatistics; @@ -99,7 +100,7 @@ public InMemoryHiveMetastore(File baseDirectory) } @Override - public synchronized void createDatabase(Database database) + public synchronized void createDatabase(MetastoreContext metastoreContext, Database database) { requireNonNull(database, "database is null"); @@ -124,19 +125,19 @@ public synchronized void createDatabase(Database database) } @Override - public synchronized void dropDatabase(String databaseName) + public synchronized void dropDatabase(MetastoreContext metastoreContext, String databaseName) { if (!databases.containsKey(databaseName)) { throw new SchemaNotFoundException(databaseName); } - if (!getAllTables(databaseName).orElse(ImmutableList.of()).isEmpty()) { + if (!getAllTables(metastoreContext, databaseName).orElse(ImmutableList.of()).isEmpty()) { throw new PrestoException(SCHEMA_NOT_EMPTY, "Schema not empty: " + databaseName); } databases.remove(databaseName); } @Override - public synchronized void alterDatabase(String databaseName, Database newDatabase) + public synchronized void alterDatabase(MetastoreContext metastoreContext, String databaseName, Database newDatabase) { String newDatabaseName = newDatabase.getName(); @@ -163,13 +164,13 @@ public synchronized void alterDatabase(String databaseName, Database newDatabase } @Override - public synchronized List getAllDatabases() + public synchronized List getAllDatabases(MetastoreContext metastoreContext) { return ImmutableList.copyOf(databases.keySet()); } @Override - public synchronized void createTable(Table table) + public synchronized void createTable(MetastoreContext metastoreContext, Table table) { TableType tableType = TableType.valueOf(table.getTableType()); checkArgument(EnumSet.of(MANAGED_TABLE, EXTERNAL_TABLE, VIRTUAL_VIEW).contains(tableType), "Invalid table type: %s", tableType); @@ -203,9 +204,9 @@ public synchronized void createTable(Table table) } @Override - public synchronized void dropTable(String databaseName, String tableName, boolean deleteData) + public synchronized void dropTable(MetastoreContext metastoreContext, String databaseName, String tableName, boolean deleteData) { - List locations = listAllDataPaths(this, databaseName, tableName); + List locations = listAllDataPaths(metastoreContext, this, databaseName, tableName); SchemaTableName schemaTableName = new SchemaTableName(databaseName, tableName); Table table = relations.remove(schemaTableName); @@ -227,10 +228,10 @@ public synchronized void dropTable(String databaseName, String tableName, boolea } } - private static List listAllDataPaths(HiveMetastore metastore, String schemaName, String tableName) + private static List listAllDataPaths(MetastoreContext metastoreContext, HiveMetastore metastore, String schemaName, String tableName) { ImmutableList.Builder locations = ImmutableList.builder(); - Table table = metastore.getTable(schemaName, tableName).get(); + Table table = metastore.getTable(metastoreContext, schemaName, tableName).get(); if (table.getSd().getLocation() != null) { // For unpartitioned table, there should be nothing directly under this directory. // But including this location in the set makes the directory content assert more @@ -238,9 +239,9 @@ private static List listAllDataPaths(HiveMetastore metastore, String sch locations.add(table.getSd().getLocation()); } - Optional> partitionNames = metastore.getPartitionNames(schemaName, tableName); + Optional> partitionNames = metastore.getPartitionNames(metastoreContext, schemaName, tableName); if (partitionNames.isPresent()) { - metastore.getPartitionsByNames(schemaName, tableName, partitionNames.get()).stream() + metastore.getPartitionsByNames(metastoreContext, schemaName, tableName, partitionNames.get()).stream() .map(partition -> partition.getSd().getLocation()) .filter(location -> !location.startsWith(table.getSd().getLocation())) .forEach(locations::add); @@ -250,7 +251,7 @@ private static List listAllDataPaths(HiveMetastore metastore, String sch } @Override - public synchronized void alterTable(String databaseName, String tableName, Table newTable) + public synchronized void alterTable(MetastoreContext metastoreContext, String databaseName, String tableName, Table newTable) { SchemaTableName oldName = new SchemaTableName(databaseName, tableName); SchemaTableName newName = new SchemaTableName(newTable.getDbName(), newTable.getTableName()); @@ -276,7 +277,7 @@ public synchronized void alterTable(String databaseName, String tableName, Table } @Override - public synchronized Optional> getAllTables(String databaseName) + public synchronized Optional> getAllTables(MetastoreContext metastoreContext, String databaseName) { ImmutableList.Builder tables = ImmutableList.builder(); for (SchemaTableName schemaTableName : this.relations.keySet()) { @@ -288,7 +289,7 @@ public synchronized Optional> getAllTables(String databaseName) } @Override - public synchronized Optional> getAllViews(String databaseName) + public synchronized Optional> getAllViews(MetastoreContext metastoreContext, String databaseName) { ImmutableList.Builder tables = ImmutableList.builder(); for (SchemaTableName schemaTableName : this.views.keySet()) { @@ -300,13 +301,13 @@ public synchronized Optional> getAllViews(String databaseName) } @Override - public synchronized Optional getDatabase(String databaseName) + public synchronized Optional getDatabase(MetastoreContext metastoreContext, String databaseName) { return Optional.ofNullable(databases.get(databaseName)); } @Override - public synchronized void addPartitions(String databaseName, String tableName, List partitionsWithStatistics) + public synchronized void addPartitions(MetastoreContext metastoreContext, String databaseName, String tableName, List partitionsWithStatistics) { for (PartitionWithStatistics partitionWithStatistics : partitionsWithStatistics) { Partition partition = toMetastoreApiPartition(partitionWithStatistics.getPartition()); @@ -320,14 +321,14 @@ public synchronized void addPartitions(String databaseName, String tableName, Li } @Override - public synchronized void dropPartition(String databaseName, String tableName, List parts, boolean deleteData) + public synchronized void dropPartition(MetastoreContext metastoreContext, String databaseName, String tableName, List parts, boolean deleteData) { partitions.entrySet().removeIf(entry -> entry.getKey().matches(databaseName, tableName) && entry.getValue().getValues().equals(parts)); } @Override - public synchronized void alterPartition(String databaseName, String tableName, PartitionWithStatistics partitionWithStatistics) + public synchronized void alterPartition(MetastoreContext metastoreContext, String databaseName, String tableName, PartitionWithStatistics partitionWithStatistics) { Partition partition = toMetastoreApiPartition(partitionWithStatistics.getPartition()); if (partition.getParameters() == null) { @@ -339,7 +340,7 @@ public synchronized void alterPartition(String databaseName, String tableName, P } @Override - public synchronized Optional> getPartitionNames(String databaseName, String tableName) + public synchronized Optional> getPartitionNames(MetastoreContext metastoreContext, String databaseName, String tableName) { return Optional.of(ImmutableList.copyOf(partitions.entrySet().stream() .filter(entry -> entry.getKey().matches(databaseName, tableName)) @@ -348,7 +349,7 @@ public synchronized Optional> getPartitionNames(String databaseName } @Override - public synchronized Optional getPartition(String databaseName, String tableName, List partitionValues) + public synchronized Optional getPartition(MetastoreContext metastoreContext, String databaseName, String tableName, List partitionValues) { PartitionName name = PartitionName.partition(databaseName, tableName, partitionValues); Partition partition = partitions.get(name); @@ -359,7 +360,7 @@ public synchronized Optional getPartition(String databaseName, String } @Override - public synchronized Optional> getPartitionNamesByParts(String databaseName, String tableName, List parts) + public synchronized Optional> getPartitionNamesByParts(MetastoreContext metastoreContext, String databaseName, String tableName, List parts) { return Optional.of(partitions.entrySet().stream() .filter(entry -> partitionMatches(entry.getValue(), databaseName, tableName, parts)) @@ -368,10 +369,10 @@ public synchronized Optional> getPartitionNamesByParts(String datab } @Override - public List getPartitionNamesByFilter(String databaseName, String tableName, Map partitionPredicates) + public List getPartitionNamesByFilter(MetastoreContext metastoreContext, String databaseName, String tableName, Map partitionPredicates) { List parts = convertPredicateToParts(partitionPredicates); - return getPartitionNamesByParts(databaseName, tableName, parts).orElse(ImmutableList.of()); + return getPartitionNamesByParts(metastoreContext, databaseName, tableName, parts).orElse(ImmutableList.of()); } private static boolean partitionMatches(Partition partition, String databaseName, String tableName, List parts) @@ -394,7 +395,7 @@ private static boolean partitionMatches(Partition partition, String databaseName } @Override - public synchronized List getPartitionsByNames(String databaseName, String tableName, List partitionNames) + public synchronized List getPartitionsByNames(MetastoreContext metastoreContext, String databaseName, String tableName, List partitionNames) { ImmutableList.Builder builder = ImmutableList.builder(); for (String name : partitionNames) { @@ -409,20 +410,20 @@ public synchronized List getPartitionsByNames(String databaseName, St } @Override - public synchronized Optional
getTable(String databaseName, String tableName) + public synchronized Optional
getTable(MetastoreContext metastoreContext, String databaseName, String tableName) { SchemaTableName schemaTableName = new SchemaTableName(databaseName, tableName); return Optional.ofNullable(relations.get(schemaTableName)); } @Override - public Set getSupportedColumnStatistics(Type type) + public Set getSupportedColumnStatistics(MetastoreContext metastoreContext, Type type) { return MetastoreUtil.getSupportedColumnStatistics(type); } @Override - public synchronized PartitionStatistics getTableStatistics(String databaseName, String tableName) + public synchronized PartitionStatistics getTableStatistics(MetastoreContext metastoreContext, String databaseName, String tableName) { SchemaTableName schemaTableName = new SchemaTableName(databaseName, tableName); PartitionStatistics statistics = columnStatistics.get(schemaTableName); @@ -433,7 +434,7 @@ public synchronized PartitionStatistics getTableStatistics(String databaseName, } @Override - public synchronized Map getPartitionStatistics(String databaseName, String tableName, Set partitionNames) + public synchronized Map getPartitionStatistics(MetastoreContext metastoreContext, String databaseName, String tableName, Set partitionNames) { ImmutableMap.Builder result = ImmutableMap.builder(); for (String partitionName : partitionNames) { @@ -448,68 +449,68 @@ public synchronized Map getPartitionStatistics(Stri } @Override - public synchronized void updateTableStatistics(String databaseName, String tableName, Function update) + public synchronized void updateTableStatistics(MetastoreContext metastoreContext, String databaseName, String tableName, Function update) { - columnStatistics.put(new SchemaTableName(databaseName, tableName), update.apply(getTableStatistics(databaseName, tableName))); + columnStatistics.put(new SchemaTableName(databaseName, tableName), update.apply(getTableStatistics(metastoreContext, databaseName, tableName))); } @Override - public synchronized void updatePartitionStatistics(String databaseName, String tableName, String partitionName, Function update) + public synchronized void updatePartitionStatistics(MetastoreContext metastoreContext, String databaseName, String tableName, String partitionName, Function update) { PartitionName partitionKey = PartitionName.partition(databaseName, tableName, partitionName); - partitionColumnStatistics.put(partitionKey, update.apply(getPartitionStatistics(databaseName, tableName, ImmutableSet.of(partitionName)).get(partitionName))); + partitionColumnStatistics.put(partitionKey, update.apply(getPartitionStatistics(metastoreContext, databaseName, tableName, ImmutableSet.of(partitionName)).get(partitionName))); } @Override - public void createRole(String role, String grantor) + public void createRole(MetastoreContext metastoreContext, String role, String grantor) { throw new UnsupportedOperationException(); } @Override - public void dropRole(String role) + public void dropRole(MetastoreContext metastoreContext, String role) { throw new UnsupportedOperationException(); } @Override - public Set listRoles() + public Set listRoles(MetastoreContext metastoreContext) { throw new UnsupportedOperationException(); } @Override - public void grantRoles(Set roles, Set grantees, boolean withAdminOption, PrestoPrincipal grantor) + public void grantRoles(MetastoreContext metastoreContext, Set roles, Set grantees, boolean withAdminOption, PrestoPrincipal grantor) { throw new UnsupportedOperationException(); } @Override - public void revokeRoles(Set roles, Set grantees, boolean adminOptionFor, PrestoPrincipal grantor) + public void revokeRoles(MetastoreContext metastoreContext, Set roles, Set grantees, boolean adminOptionFor, PrestoPrincipal grantor) { throw new UnsupportedOperationException(); } @Override - public Set listRoleGrants(PrestoPrincipal principal) + public Set listRoleGrants(MetastoreContext metastoreContext, PrestoPrincipal principal) { throw new UnsupportedOperationException(); } @Override - public Set listTablePrivileges(String databaseName, String tableName, PrestoPrincipal principal) + public Set listTablePrivileges(MetastoreContext metastoreContext, String databaseName, String tableName, PrestoPrincipal principal) { throw new UnsupportedOperationException(); } @Override - public void grantTablePrivileges(String databaseName, String tableName, PrestoPrincipal grantee, Set privileges) + public void grantTablePrivileges(MetastoreContext metastoreContext, String databaseName, String tableName, PrestoPrincipal grantee, Set privileges) { throw new UnsupportedOperationException(); } @Override - public void revokeTablePrivileges(String databaseName, String tableName, PrestoPrincipal grantee, Set privileges) + public void revokeTablePrivileges(MetastoreContext metastoreContext, String databaseName, String tableName, PrestoPrincipal grantee, Set privileges) { throw new UnsupportedOperationException(); } diff --git a/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/thrift/MockHiveMetastoreClient.java b/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/thrift/MockHiveMetastoreClient.java index 56a88f0baf0a0..21df78e3f73ec 100644 --- a/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/thrift/MockHiveMetastoreClient.java +++ b/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/thrift/MockHiveMetastoreClient.java @@ -15,6 +15,7 @@ import com.facebook.presto.common.predicate.Domain; import com.facebook.presto.hive.metastore.Column; +import com.facebook.presto.hive.metastore.MetastoreContext; import com.facebook.presto.hive.metastore.PartitionNameWithVersion; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; @@ -52,6 +53,8 @@ public class MockHiveMetastoreClient public static final String TEST_DATABASE = "testdb"; public static final String BAD_DATABASE = "baddb"; public static final String TEST_TABLE = "testtbl"; + public static final String TEST_TOKEN = "token"; + public static final MetastoreContext TEST_METASTORE_CONTEXT = new MetastoreContext("test_user"); public static final String TEST_PARTITION1 = "key=testpartition1"; public static final String TEST_PARTITION2 = "key=testpartition2"; public static final List TEST_PARTITION_VALUES1 = ImmutableList.of("testpartition1"); @@ -90,6 +93,12 @@ public List getAllDatabases() return ImmutableList.of(TEST_DATABASE); } + @Override + public String getDelegationToken(String owner, String renewer) + { + return TEST_TOKEN; + } + @Override public List getAllTables(String dbName) { diff --git a/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/thrift/MockHiveMetastoreClientFactory.java b/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/thrift/MockHiveMetastoreClientFactory.java index 216ff776e9e3e..5ff6e10b0b7c0 100644 --- a/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/thrift/MockHiveMetastoreClientFactory.java +++ b/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/thrift/MockHiveMetastoreClientFactory.java @@ -37,7 +37,7 @@ public MockHiveMetastoreClientFactory(Optional socksProxy, Duration } @Override - public HiveMetastoreClient create(HostAndPort address) + public HiveMetastoreClient create(HostAndPort address, Optional token) throws TTransportException { checkState(!clients.isEmpty(), "mock not given enough clients"); diff --git a/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/thrift/TestStaticHiveCluster.java b/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/thrift/TestStaticHiveCluster.java index da5f947fdd787..f8fbdc75f1f57 100644 --- a/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/thrift/TestStaticHiveCluster.java +++ b/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/thrift/TestStaticHiveCluster.java @@ -51,7 +51,7 @@ public void testDefaultHiveMetastore() throws TException { HiveCluster cluster = createHiveCluster(CONFIG_WITH_FALLBACK, singletonList(DEFAULT_CLIENT)); - assertEquals(cluster.createMetastoreClient(), DEFAULT_CLIENT); + assertEquals(cluster.createMetastoreClient(Optional.empty()), DEFAULT_CLIENT); } @Test @@ -59,7 +59,7 @@ public void testFallbackHiveMetastore() throws TException { HiveCluster cluster = createHiveCluster(CONFIG_WITH_FALLBACK, asList(null, null, FALLBACK_CLIENT)); - assertEquals(cluster.createMetastoreClient(), FALLBACK_CLIENT); + assertEquals(cluster.createMetastoreClient(Optional.empty()), FALLBACK_CLIENT); } @Test @@ -81,7 +81,7 @@ public void testFallbackHiveMetastoreWithHiveUser() throws TException { HiveCluster cluster = createHiveCluster(CONFIG_WITH_FALLBACK_WITH_USER, asList(null, null, FALLBACK_CLIENT)); - assertEquals(cluster.createMetastoreClient(), FALLBACK_CLIENT); + assertEquals(cluster.createMetastoreClient(Optional.empty()), FALLBACK_CLIENT); } @Test @@ -94,7 +94,7 @@ public void testMetastoreFailedWithoutFallbackWithHiveUser() private static void assertCreateClientFails(HiveCluster cluster, String message) { try { - cluster.createMetastoreClient(); + cluster.createMetastoreClient(Optional.empty()); fail("expected exception"); } catch (TException e) { diff --git a/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/thrift/TestStaticMetastoreConfig.java b/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/thrift/TestStaticMetastoreConfig.java index f76c2b3c611fb..24dab88b37769 100644 --- a/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/thrift/TestStaticMetastoreConfig.java +++ b/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/thrift/TestStaticMetastoreConfig.java @@ -32,7 +32,8 @@ public void testDefaults() { assertRecordedDefaults(recordDefaults(StaticMetastoreConfig.class) .setMetastoreUris(null) - .setMetastoreUsername(null)); + .setMetastoreUsername(null) + .setMetastoreLoadBalancingEnabled(false)); } @Test @@ -41,11 +42,13 @@ public void testExplicitPropertyMappingsSingleMetastore() Map properties = new ImmutableMap.Builder() .put("hive.metastore.uri", "thrift://localhost:9083") .put("hive.metastore.username", "presto") + .put("hive.metastore.load-balancing-enabled", "true") .build(); StaticMetastoreConfig expected = new StaticMetastoreConfig() .setMetastoreUris("thrift://localhost:9083") - .setMetastoreUsername("presto"); + .setMetastoreUsername("presto") + .setMetastoreLoadBalancingEnabled(true); assertFullMapping(properties, expected); assertEquals(expected.getMetastoreUris(), ImmutableList.of(URI.create("thrift://localhost:9083"))); @@ -58,11 +61,13 @@ public void testExplicitPropertyMappingsMultipleMetastores() Map properties = new ImmutableMap.Builder() .put("hive.metastore.uri", "thrift://localhost:9083,thrift://192.0.2.3:8932") .put("hive.metastore.username", "presto") + .put("hive.metastore.load-balancing-enabled", "true") .build(); StaticMetastoreConfig expected = new StaticMetastoreConfig() .setMetastoreUris("thrift://localhost:9083,thrift://192.0.2.3:8932") - .setMetastoreUsername("presto"); + .setMetastoreUsername("presto") + .setMetastoreLoadBalancingEnabled(true); assertFullMapping(properties, expected); assertEquals(expected.getMetastoreUris(), ImmutableList.of( diff --git a/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/thrift/TestingHiveCluster.java b/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/thrift/TestingHiveCluster.java index 570f66c9cb1e3..a303568962ed3 100644 --- a/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/thrift/TestingHiveCluster.java +++ b/presto-hive-metastore/src/test/java/com/facebook/presto/hive/metastore/thrift/TestingHiveCluster.java @@ -19,6 +19,7 @@ import org.apache.thrift.TException; import java.util.Objects; +import java.util.Optional; import static java.util.Objects.requireNonNull; @@ -35,10 +36,10 @@ public TestingHiveCluster(MetastoreClientConfig metastoreClientConfig, String ho } @Override - public HiveMetastoreClient createMetastoreClient() + public HiveMetastoreClient createMetastoreClient(Optional token) throws TException { - return new HiveMetastoreClientFactory(metastoreClientConfig, new NoHiveMetastoreAuthentication()).create(address); + return new HiveMetastoreClientFactory(metastoreClientConfig, new NoHiveMetastoreAuthentication()).create(address, token); } @Override diff --git a/presto-hive/src/main/java/com/facebook/presto/hive/CreateEmptyPartitionProcedure.java b/presto-hive/src/main/java/com/facebook/presto/hive/CreateEmptyPartitionProcedure.java index 76ddcb13a5afe..8ab7c9ee5f2b7 100644 --- a/presto-hive/src/main/java/com/facebook/presto/hive/CreateEmptyPartitionProcedure.java +++ b/presto-hive/src/main/java/com/facebook/presto/hive/CreateEmptyPartitionProcedure.java @@ -17,6 +17,7 @@ import com.facebook.presto.hive.LocationService.WriteInfo; import com.facebook.presto.hive.PartitionUpdate.UpdateMode; import com.facebook.presto.hive.metastore.ExtendedHiveMetastore; +import com.facebook.presto.hive.metastore.MetastoreContext; import com.facebook.presto.spi.ConnectorSession; import com.facebook.presto.spi.PrestoException; import com.facebook.presto.spi.classloader.ThreadContextClassLoader; @@ -108,7 +109,7 @@ private void doCreateEmptyPartition(ConnectorSession session, String schema, Str .map(String.class::cast) .collect(toImmutableList()); - if (metastore.getPartition(schema, table, partitionStringValues).isPresent()) { + if (metastore.getPartition(new MetastoreContext(session.getIdentity()), schema, table, partitionStringValues).isPresent()) { throw new PrestoException(ALREADY_EXISTS, "Partition already exists"); } String partitionName = FileUtils.makePartName(actualPartitionColumnNames, partitionStringValues); diff --git a/presto-hive/src/main/java/com/facebook/presto/hive/HiveMetadata.java b/presto-hive/src/main/java/com/facebook/presto/hive/HiveMetadata.java index 973181ea3296c..4670226e0e689 100644 --- a/presto-hive/src/main/java/com/facebook/presto/hive/HiveMetadata.java +++ b/presto-hive/src/main/java/com/facebook/presto/hive/HiveMetadata.java @@ -31,6 +31,7 @@ import com.facebook.presto.hive.metastore.HiveColumnStatistics; import com.facebook.presto.hive.metastore.HivePrivilegeInfo; import com.facebook.presto.hive.metastore.HivePrivilegeInfo.HivePrivilege; +import com.facebook.presto.hive.metastore.MetastoreContext; import com.facebook.presto.hive.metastore.MetastoreUtil; import com.facebook.presto.hive.metastore.Partition; import com.facebook.presto.hive.metastore.PartitionStatistics; @@ -81,6 +82,7 @@ import com.facebook.presto.spi.relation.RowExpression; import com.facebook.presto.spi.relation.RowExpressionService; import com.facebook.presto.spi.relation.SpecialFormExpression; +import com.facebook.presto.spi.security.ConnectorIdentity; import com.facebook.presto.spi.security.GrantInfo; import com.facebook.presto.spi.security.PrestoPrincipal; import com.facebook.presto.spi.security.Privilege; @@ -447,14 +449,14 @@ public SemiTransactionalHiveMetastore getMetastore() @Override public List listSchemaNames(ConnectorSession session) { - return metastore.getAllDatabases(); + return metastore.getAllDatabases(new MetastoreContext(session.getIdentity())); } @Override public HiveTableHandle getTableHandle(ConnectorSession session, SchemaTableName tableName) { requireNonNull(tableName, "tableName is null"); - Optional
table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()); + Optional
table = metastore.getTable(new MetastoreContext(session.getIdentity()), tableName.getSchemaName(), tableName.getTableName()); if (!table.isPresent()) { return null; } @@ -479,7 +481,7 @@ public ConnectorTableHandle getTableHandleForStatisticsCollection(ConnectorSessi return null; } Optional>> partitionValuesList = getPartitionList(analyzeProperties); - ConnectorTableMetadata tableMetadata = getTableMetadata(handle.getSchemaTableName()); + ConnectorTableMetadata tableMetadata = getTableMetadata(session, handle.getSchemaTableName()); handle = handle.withAnalyzePartitionValues(partitionValuesList); List partitionedBy = getPartitionedBy(tableMetadata.getProperties()); @@ -497,14 +499,14 @@ public Optional getSystemTable(ConnectorSession session, SchemaTabl return getPartitionsSystemTable(session, tableName, SystemTableHandler.PARTITIONS.getSourceTableName(tableName)); } if (SystemTableHandler.PROPERTIES.matches(tableName)) { - return getPropertiesSystemTable(tableName, SystemTableHandler.PROPERTIES.getSourceTableName(tableName)); + return getPropertiesSystemTable(session, tableName, SystemTableHandler.PROPERTIES.getSourceTableName(tableName)); } return Optional.empty(); } - private Optional getPropertiesSystemTable(SchemaTableName tableName, SchemaTableName sourceTableName) + private Optional getPropertiesSystemTable(ConnectorSession session, SchemaTableName tableName, SchemaTableName sourceTableName) { - Optional
table = metastore.getTable(sourceTableName.getSchemaName(), sourceTableName.getTableName()); + Optional
table = metastore.getTable(new MetastoreContext(session.getIdentity()), sourceTableName.getSchemaName(), sourceTableName.getTableName()); if (!table.isPresent() || table.get().getTableType().equals(VIRTUAL_VIEW)) { throw new TableNotFoundException(tableName); } @@ -528,7 +530,7 @@ private Optional getPartitionsSystemTable(ConnectorSession session, return Optional.empty(); } - List partitionColumns = getPartitionColumns(sourceTableName); + List partitionColumns = getPartitionColumns(session.getIdentity(), sourceTableName); if (partitionColumns.isEmpty()) { return Optional.empty(); } @@ -570,9 +572,9 @@ private Optional getPartitionsSystemTable(ConnectorSession session, })); } - private List getPartitionColumns(SchemaTableName tableName) + private List getPartitionColumns(ConnectorIdentity identity, SchemaTableName tableName) { - Table sourceTable = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()).get(); + Table sourceTable = metastore.getTable(new MetastoreContext(identity), tableName.getSchemaName(), tableName.getTableName()).get(); return getPartitionKeyColumnHandles(sourceTable); } @@ -581,12 +583,12 @@ public ConnectorTableMetadata getTableMetadata(ConnectorSession session, Connect { requireNonNull(tableHandle, "tableHandle is null"); SchemaTableName tableName = schemaTableName(tableHandle); - return getTableMetadata(tableName); + return getTableMetadata(session, tableName); } - private ConnectorTableMetadata getTableMetadata(SchemaTableName tableName) + private ConnectorTableMetadata getTableMetadata(ConnectorSession session, SchemaTableName tableName) { - Optional
table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()); + Optional
table = metastore.getTable(new MetastoreContext(session.getIdentity()), tableName.getSchemaName(), tableName.getTableName()); if (!table.isPresent() || table.get().getTableType().equals(VIRTUAL_VIEW)) { throw new TableNotFoundException(tableName); } @@ -724,7 +726,7 @@ public List listTables(ConnectorSession session, String schemaN { ImmutableList.Builder tableNames = ImmutableList.builder(); for (String schemaName : listSchemas(session, schemaNameOrNull)) { - for (String tableName : metastore.getAllTables(schemaName).orElse(emptyList())) { + for (String tableName : metastore.getAllTables(new MetastoreContext(session.getIdentity()), schemaName).orElse(emptyList())) { tableNames.add(new SchemaTableName(schemaName, tableName)); } } @@ -743,7 +745,7 @@ private List listSchemas(ConnectorSession session, String schemaNameOrNu public Map getColumnHandles(ConnectorSession session, ConnectorTableHandle tableHandle) { SchemaTableName tableName = schemaTableName(tableHandle); - Optional
table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()); + Optional
table = metastore.getTable(new MetastoreContext(session.getIdentity()), tableName.getSchemaName(), tableName.getTableName()); if (!table.isPresent()) { throw new TableNotFoundException(tableName); } @@ -762,7 +764,7 @@ public Map> listTableColumns(ConnectorSess ImmutableMap.Builder> columns = ImmutableMap.builder(); for (SchemaTableName tableName : listTables(session, prefix)) { try { - columns.put(tableName, getTableMetadata(tableName).getColumns()); + columns.put(tableName, getTableMetadata(session, tableName).getColumns()); } catch (HiveViewNotSupportedException e) { // view is not supported @@ -881,7 +883,7 @@ public void createSchema(ConnectorSession session, String schemaName, Map table = metastore.getTable(schemaName, tableName); + Optional
table = metastore.getTable(new MetastoreContext(session.getIdentity()), schemaName, tableName); if (!table.isPresent()) { throw new TableNotFoundException(new SchemaTableName(schemaName, tableName)); @@ -1391,7 +1393,7 @@ private void failIfAvroSchemaIsSet(HiveTableHandle handle) public void renameTable(ConnectorSession session, ConnectorTableHandle tableHandle, SchemaTableName newTableName) { HiveTableHandle handle = (HiveTableHandle) tableHandle; - metastore.renameTable(handle.getSchemaName(), handle.getTableName(), newTableName.getSchemaName(), newTableName.getTableName()); + metastore.renameTable(new MetastoreContext(session.getIdentity()), handle.getSchemaName(), handle.getTableName(), newTableName.getSchemaName(), newTableName.getTableName()); } @Override @@ -1400,7 +1402,7 @@ public void dropTable(ConnectorSession session, ConnectorTableHandle tableHandle HiveTableHandle handle = (HiveTableHandle) tableHandle; SchemaTableName tableName = schemaTableName(tableHandle); - Optional
target = metastore.getTable(handle.getSchemaName(), handle.getTableName()); + Optional
target = metastore.getTable(new MetastoreContext(session.getIdentity()), handle.getSchemaName(), handle.getTableName()); if (!target.isPresent()) { throw new TableNotFoundException(tableName); } @@ -1417,7 +1419,7 @@ public ConnectorTableHandle beginStatisticsCollection(ConnectorSession session, HiveTableHandle handle = (HiveTableHandle) tableHandle; SchemaTableName tableName = handle.getSchemaTableName(); - metastore.getTable(tableName.getSchemaName(), tableName.getTableName()) + metastore.getTable(new MetastoreContext(session.getIdentity()), tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new TableNotFoundException(tableName)); return handle; } @@ -1427,7 +1429,7 @@ public void finishStatisticsCollection(ConnectorSession session, ConnectorTableH { HiveTableHandle handle = (HiveTableHandle) tableHandle; SchemaTableName tableName = handle.getSchemaTableName(); - Table table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()) + Table table = metastore.getTable(new MetastoreContext(session.getIdentity()), tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new TableNotFoundException(handle.getSchemaTableName())); List partitionColumns = table.getPartitionColumns(); @@ -1443,7 +1445,7 @@ public void finishStatisticsCollection(ConnectorSession session, ConnectorTableH if (partitionColumns.isEmpty()) { // commit analyze to unpartitioned table - metastore.setTableStatistics(table, createPartitionStatistics(session, columnTypes, computedStatisticsMap.get(ImmutableList.of()))); + metastore.setTableStatistics(new MetastoreContext(session.getIdentity()), table, createPartitionStatistics(session, columnTypes, computedStatisticsMap.get(ImmutableList.of()))); } else { List> partitionValuesList; @@ -1451,7 +1453,7 @@ public void finishStatisticsCollection(ConnectorSession session, ConnectorTableH partitionValuesList = handle.getAnalyzePartitionValues().get(); } else { - partitionValuesList = metastore.getPartitionNames(handle.getSchemaName(), handle.getTableName()) + partitionValuesList = metastore.getPartitionNames(new MetastoreContext(session.getIdentity()), handle.getSchemaName(), handle.getTableName()) .orElseThrow(() -> new TableNotFoundException(((HiveTableHandle) tableHandle).getSchemaTableName())) .stream() .map(MetastoreUtil::toPartitionValues) @@ -1462,7 +1464,7 @@ public void finishStatisticsCollection(ConnectorSession session, ConnectorTableH Map> columnStatisticTypes = hiveColumnHandles.stream() .filter(columnHandle -> !partitionColumnNames.contains(columnHandle.getName())) .filter(column -> !column.isHidden()) - .collect(toImmutableMap(HiveColumnHandle::getName, column -> ImmutableSet.copyOf(metastore.getSupportedColumnStatistics(typeManager.getType(column.getTypeSignature()))))); + .collect(toImmutableMap(HiveColumnHandle::getName, column -> ImmutableSet.copyOf(metastore.getSupportedColumnStatistics(new MetastoreContext(session.getIdentity()), typeManager.getType(column.getTypeSignature()))))); Supplier emptyPartitionStatistics = Suppliers.memoize(() -> createEmptyPartitionStatistics(columnTypes, columnStatisticTypes)); int usedComputedStatistics = 0; @@ -1477,7 +1479,7 @@ public void finishStatisticsCollection(ConnectorSession session, ConnectorTableH } } verify(usedComputedStatistics == computedStatistics.size(), "All computed statistics must be used"); - metastore.setPartitionStatistics(table, partitionStatistics.build()); + metastore.setPartitionStatistics(new MetastoreContext(session.getIdentity()), table, partitionStatistics.build()); } } @@ -1543,7 +1545,7 @@ public HiveOutputTableHandle beginCreateTable(ConnectorSession session, Connecto tableName, columnHandles, session.getQueryId(), - metastore.generatePageSinkMetadata(schemaTableName), + metastore.generatePageSinkMetadata(new MetastoreContext(session.getIdentity()), schemaTableName), locationHandle, tableStorageFormat, partitionStorageFormat, @@ -1559,6 +1561,7 @@ public HiveOutputTableHandle beginCreateTable(ConnectorSession session, Connecto WriteInfo writeInfo = locationService.getQueryWriteInfo(locationHandle); metastore.declareIntentionToWrite( context, + new MetastoreContext(session.getIdentity()), writeInfo.getWriteMode(), writeInfo.getWritePath(), writeInfo.getTempPath(), @@ -1799,7 +1802,7 @@ public HiveInsertTableHandle beginInsert(ConnectorSession session, ConnectorTabl verifyJvmTimeZone(); SchemaTableName tableName = schemaTableName(tableHandle); - Optional
table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()); + Optional
table = metastore.getTable(new MetastoreContext(session.getIdentity()), tableName.getSchemaName(), tableName.getTableName()); if (!table.isPresent()) { throw new TableNotFoundException(tableName); } @@ -1853,7 +1856,7 @@ public HiveInsertTableHandle beginInsert(ConnectorSession session, ConnectorTabl tableName.getTableName(), handles, session.getQueryId(), - metastore.generatePageSinkMetadata(tableName), + metastore.generatePageSinkMetadata(new MetastoreContext(session.getIdentity()), tableName), locationHandle, table.get().getStorage().getBucketProperty(), decodePreferredOrderingColumnsFromStorage(table.get().getStorage()), @@ -1866,6 +1869,7 @@ public HiveInsertTableHandle beginInsert(ConnectorSession session, ConnectorTabl WriteInfo writeInfo = locationService.getQueryWriteInfo(locationHandle); metastore.declareIntentionToWrite( new HdfsContext(session, tableName.getSchemaName(), tableName.getTableName(), table.get().getStorage().getLocation(), false), + new MetastoreContext(session.getIdentity()), writeInfo.getWriteMode(), writeInfo.getWritePath(), writeInfo.getTempPath(), @@ -1896,7 +1900,7 @@ public Optional finishInsert(ConnectorSession session, HiveStorageFormat tableStorageFormat = handle.getTableStorageFormat(); partitionUpdates = PartitionUpdate.mergePartitionUpdates(partitionUpdates); - Optional
table = metastore.getTable(handle.getSchemaName(), handle.getTableName()); + Optional
table = metastore.getTable(new MetastoreContext(session.getIdentity()), handle.getSchemaName(), handle.getTableName()); if (!table.isPresent()) { throw new TableNotFoundException(new SchemaTableName(handle.getSchemaName(), handle.getTableName())); } @@ -1941,7 +1945,7 @@ public Optional finishInsert(ConnectorSession session, .collect(toImmutableMap(HiveColumnHandle::getName, column -> column.getHiveType().getType(typeManager))); Map, ComputedStatistics> partitionComputedStatistics = createComputedStatisticsToPartitionMap(computedStatistics, partitionedBy, columnTypes); - Set existingPartitions = getExistingPartitionNames(handle.getSchemaName(), handle.getTableName(), partitionUpdates); + Set existingPartitions = getExistingPartitionNames(session.getIdentity(), handle.getSchemaName(), handle.getTableName(), partitionUpdates); for (PartitionUpdate partitionUpdate : partitionUpdates) { if (partitionUpdate.getName().isEmpty()) { @@ -2093,7 +2097,7 @@ private Map getColumnStatistics(Map .orElse(ImmutableMap.of()); } - private Set getExistingPartitionNames(String databaseName, String tableName, List partitionUpdates) + private Set getExistingPartitionNames(ConnectorIdentity identity, String databaseName, String tableName, List partitionUpdates) { ImmutableSet.Builder existingPartitions = ImmutableSet.builder(); ImmutableSet.Builder potentiallyNewPartitions = ImmutableSet.builder(); @@ -2114,7 +2118,7 @@ private Set getExistingPartitionNames(String databaseName, String tableN // try to load potentially new partitions in batches to check if any of them exist Lists.partition(ImmutableList.copyOf(potentiallyNewPartitions.build()), maxPartitionBatchSize).stream() - .flatMap(partitionNames -> metastore.getPartitionsByNames(databaseName, tableName, partitionNames).entrySet().stream() + .flatMap(partitionNames -> metastore.getPartitionsByNames(new MetastoreContext(identity), databaseName, tableName, partitionNames).entrySet().stream() .filter(entry -> entry.getValue().isPresent()) .map(Map.Entry::getKey)) .forEach(existingPartitions::add); @@ -2172,13 +2176,13 @@ public void createView(ConnectorSession session, ConnectorTableMetadata viewMeta Table table = tableBuilder.build(); PrincipalPrivileges principalPrivileges = buildInitialPrivilegeSet(session.getUser()); - Optional
existing = metastore.getTable(viewName.getSchemaName(), viewName.getTableName()); + Optional
existing = metastore.getTable(new MetastoreContext(session.getIdentity()), viewName.getSchemaName(), viewName.getTableName()); if (existing.isPresent()) { if (!replace || !MetastoreUtil.isPrestoView(existing.get())) { throw new ViewAlreadyExistsException(viewName); } - metastore.replaceView(viewName.getSchemaName(), viewName.getTableName(), table, principalPrivileges); + metastore.replaceView(new MetastoreContext(session.getIdentity()), viewName.getSchemaName(), viewName.getTableName(), table, principalPrivileges); return; } @@ -2214,7 +2218,7 @@ public List listViews(ConnectorSession session, String schemaNa { ImmutableList.Builder tableNames = ImmutableList.builder(); for (String schemaName : listSchemas(session, schemaNameOrNull)) { - for (String tableName : metastore.getAllViews(schemaName).orElse(emptyList())) { + for (String tableName : metastore.getAllViews(new MetastoreContext(session.getIdentity()), schemaName).orElse(emptyList())) { tableNames.add(new SchemaTableName(schemaName, tableName)); } } @@ -2234,7 +2238,7 @@ public Map getViews(ConnectorSession s } for (SchemaTableName schemaTableName : tableNames) { - Optional
table = metastore.getTable(schemaTableName.getSchemaName(), schemaTableName.getTableName()); + Optional
table = metastore.getTable(new MetastoreContext(session.getIdentity()), schemaTableName.getSchemaName(), schemaTableName.getTableName()); if (table.isPresent() && MetastoreUtil.isPrestoView(table.get())) { views.put(schemaTableName, new ConnectorViewDefinition( schemaTableName, @@ -2251,7 +2255,7 @@ public Optional getMaterializedView(Connect { requireNonNull(viewName, "viewName is null"); - Optional
table = metastore.getTable(viewName.getSchemaName(), viewName.getTableName()); + Optional
table = metastore.getTable(new MetastoreContext(session.getIdentity()), viewName.getSchemaName(), viewName.getTableName()); if (table.isPresent() && MetastoreUtil.isPrestoMaterializedView(table.get())) { try { @@ -2284,7 +2288,7 @@ public void createMaterializedView(ConnectorSession session, ConnectorTableMetad .build(); List
baseTables = viewDefinition.getBaseTables().stream() - .map(baseTableName -> metastore.getTable(baseTableName.getSchemaName(), baseTableName.getTableName()) + .map(baseTableName -> metastore.getTable(new MetastoreContext(session.getIdentity()), baseTableName.getSchemaName(), baseTableName.getTableName()) .orElseThrow(() -> new TableNotFoundException(baseTableName))) .collect(toImmutableList()); @@ -2342,7 +2346,7 @@ public OptionalLong metadataDelete(ConnectorSession session, ConnectorTableHandl HiveTableHandle handle = (HiveTableHandle) tableHandle; HiveTableLayoutHandle layoutHandle = (HiveTableLayoutHandle) tableLayoutHandle; - Optional
table = metastore.getTable(handle.getSchemaName(), handle.getTableName()); + Optional
table = metastore.getTable(new MetastoreContext(session.getIdentity()), handle.getSchemaName(), handle.getTableName()); if (!table.isPresent()) { throw new TableNotFoundException(handle.getSchemaTableName()); } @@ -2469,6 +2473,7 @@ public List getTableLayouts(ConnectorSession session TupleDomain domainPredicate = hivePartitionResult.getEffectivePredicate().transform(HiveMetadata::toSubfield); Table table = metastore.getTable( + new MetastoreContext(session.getIdentity()), handle.getSchemaTableName().getSchemaName(), handle.getSchemaTableName().getTableName()) .orElseThrow(() -> new TableNotFoundException(handle.getSchemaTableName())); @@ -2543,7 +2548,7 @@ public ConnectorTableLayout getTableLayout(ConnectorSession session, ConnectorTa Optional tablePartitioning = Optional.empty(); SchemaTableName tableName = hiveLayoutHandle.getSchemaTableName(); - Table table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()) + Table table = metastore.getTable(new MetastoreContext(session.getIdentity()), tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new TableNotFoundException(tableName)); // never ignore table bucketing for temporary tables as those are created such explicitly by the engine request boolean bucketExecutionEnabled = table.getTableType().equals(TEMPORARY_TABLE) || isBucketExecutionEnabled(session); @@ -2811,7 +2816,7 @@ public Optional getInsertLayout(ConnectorSession sessio { HiveTableHandle hiveTableHandle = (HiveTableHandle) tableHandle; SchemaTableName tableName = hiveTableHandle.getSchemaTableName(); - Table table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()) + Table table = metastore.getTable(new MetastoreContext(session.getIdentity()), tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new TableNotFoundException(tableName)); Optional hiveBucketHandle = getHiveBucketHandle(table); @@ -2857,7 +2862,7 @@ public Optional getPreferredShuffleLayoutForInsert(Conn { HiveTableHandle hiveTableHandle = (HiveTableHandle) tableHandle; SchemaTableName tableName = hiveTableHandle.getSchemaTableName(); - Table table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()) + Table table = metastore.getTable(new MetastoreContext(session.getIdentity()), tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new TableNotFoundException(tableName)); Optional hiveBucketHandle = getHiveBucketHandle(table); @@ -2958,23 +2963,23 @@ public TableStatisticsMetadata getStatisticsCollectionMetadataForWrite(Connector return TableStatisticsMetadata.empty(); } List partitionedBy = firstNonNull(getPartitionedBy(tableMetadata.getProperties()), ImmutableList.of()); - Optional
table = metastore.getTable(tableMetadata.getTable().getSchemaName(), tableMetadata.getTable().getTableName()); - return getStatisticsCollectionMetadata(tableMetadata.getColumns(), partitionedBy, false, table.isPresent() && table.get().getTableType() == TEMPORARY_TABLE); + Optional
table = metastore.getTable(new MetastoreContext(session.getIdentity()), tableMetadata.getTable().getSchemaName(), tableMetadata.getTable().getTableName()); + return getStatisticsCollectionMetadata(session, tableMetadata.getColumns(), partitionedBy, false, table.isPresent() && table.get().getTableType() == TEMPORARY_TABLE); } @Override public TableStatisticsMetadata getStatisticsCollectionMetadata(ConnectorSession session, ConnectorTableMetadata tableMetadata) { List partitionedBy = firstNonNull(getPartitionedBy(tableMetadata.getProperties()), ImmutableList.of()); - return getStatisticsCollectionMetadata(tableMetadata.getColumns(), partitionedBy, true, false); + return getStatisticsCollectionMetadata(session, tableMetadata.getColumns(), partitionedBy, true, false); } - private TableStatisticsMetadata getStatisticsCollectionMetadata(List columns, List partitionedBy, boolean includeRowCount, boolean isTemporaryTable) + private TableStatisticsMetadata getStatisticsCollectionMetadata(ConnectorSession session, List columns, List partitionedBy, boolean includeRowCount, boolean isTemporaryTable) { Set columnStatistics = columns.stream() .filter(column -> !partitionedBy.contains(column.getName())) .filter(column -> !column.isHidden()) - .map(isTemporaryTable ? this::getColumnStatisticMetadataForTemporaryTable : this::getColumnStatisticMetadata) + .map(meta -> isTemporaryTable ? this.getColumnStatisticMetadataForTemporaryTable(meta) : this.getColumnStatisticMetadata(session, meta)) .flatMap(List::stream) .collect(toImmutableSet()); @@ -2982,9 +2987,9 @@ private TableStatisticsMetadata getStatisticsCollectionMetadata(List getColumnStatisticMetadata(ColumnMetadata columnMetadata) + private List getColumnStatisticMetadata(ConnectorSession session, ColumnMetadata columnMetadata) { - return getColumnStatisticMetadata(columnMetadata.getName(), metastore.getSupportedColumnStatistics(columnMetadata.getType())); + return getColumnStatisticMetadata(columnMetadata.getName(), metastore.getSupportedColumnStatistics(new MetastoreContext(session.getIdentity()), columnMetadata.getType())); } private List getColumnStatisticMetadataForTemporaryTable(ColumnMetadata columnMetadata) @@ -3005,51 +3010,51 @@ public void createRole(ConnectorSession session, String role, Optional listRoles(ConnectorSession session) { - return ImmutableSet.copyOf(metastore.listRoles()); + return ImmutableSet.copyOf(metastore.listRoles(new MetastoreContext(session.getIdentity()))); } @Override public Set listRoleGrants(ConnectorSession session, PrestoPrincipal principal) { - return ImmutableSet.copyOf(metastore.listRoleGrants(principal)); + return ImmutableSet.copyOf(metastore.listRoleGrants(new MetastoreContext(session.getIdentity()), principal)); } @Override public void grantRoles(ConnectorSession session, Set roles, Set grantees, boolean withAdminOption, Optional grantor) { - metastore.grantRoles(roles, grantees, withAdminOption, grantor.orElse(new PrestoPrincipal(USER, session.getUser()))); + metastore.grantRoles(new MetastoreContext(session.getIdentity()), roles, grantees, withAdminOption, grantor.orElse(new PrestoPrincipal(USER, session.getUser()))); } @Override public void revokeRoles(ConnectorSession session, Set roles, Set grantees, boolean adminOptionFor, Optional grantor) { - metastore.revokeRoles(roles, grantees, adminOptionFor, grantor.orElse(new PrestoPrincipal(USER, session.getUser()))); + metastore.revokeRoles(new MetastoreContext(session.getIdentity()), roles, grantees, adminOptionFor, grantor.orElse(new PrestoPrincipal(USER, session.getUser()))); } @Override public Set listApplicableRoles(ConnectorSession session, PrestoPrincipal principal) { - return ThriftMetastoreUtil.listApplicableRoles(principal, metastore::listRoleGrants) + return ThriftMetastoreUtil.listApplicableRoles(principal, (PrestoPrincipal p) -> metastore.listRoleGrants(new MetastoreContext(session.getIdentity()), p)) .collect(toImmutableSet()); } @Override public Set listEnabledRoles(ConnectorSession session) { - return ThriftMetastoreUtil.listEnabledRoles(session.getIdentity(), metastore::listRoleGrants) + return ThriftMetastoreUtil.listEnabledRoles(session.getIdentity(), (PrestoPrincipal p) -> metastore.listRoleGrants(new MetastoreContext(session.getIdentity()), p)) .collect(toImmutableSet()); } @@ -3063,7 +3068,7 @@ public void grantTablePrivileges(ConnectorSession session, SchemaTableName schem .map(privilege -> new HivePrivilegeInfo(toHivePrivilege(privilege), grantOption, new PrestoPrincipal(USER, session.getUser()), new PrestoPrincipal(USER, session.getUser()))) .collect(toSet()); - metastore.grantTablePrivileges(schemaName, tableName, grantee, hivePrivilegeInfos); + metastore.grantTablePrivileges(new MetastoreContext(session.getIdentity()), schemaName, tableName, grantee, hivePrivilegeInfos); } @Override @@ -3076,7 +3081,7 @@ public void revokeTablePrivileges(ConnectorSession session, SchemaTableName sche .map(privilege -> new HivePrivilegeInfo(toHivePrivilege(privilege), grantOption, new PrestoPrincipal(USER, session.getUser()), new PrestoPrincipal(USER, session.getUser()))) .collect(toSet()); - metastore.revokeTablePrivileges(schemaName, tableName, grantee, hivePrivilegeInfos); + metastore.revokeTablePrivileges(new MetastoreContext(session.getIdentity()), schemaName, tableName, grantee, hivePrivilegeInfos); } @Override @@ -3088,11 +3093,11 @@ public List listTablePrivileges(ConnectorSession session, SchemaTable ImmutableList.Builder result = ImmutableList.builder(); for (SchemaTableName tableName : listTables(session, schemaTablePrefix)) { if (isAdminRoleSet) { - result.addAll(buildGrants(tableName, null)); + result.addAll(buildGrants(session, tableName, null)); } else { for (PrestoPrincipal grantee : principals) { - result.addAll(buildGrants(tableName, grantee)); + result.addAll(buildGrants(session, tableName, grantee)); } } } @@ -3137,10 +3142,10 @@ public void doMetadataUpdateCleanup(QueryId queryId) hiveFileRenamer.cleanup(queryId); } - private List buildGrants(SchemaTableName tableName, PrestoPrincipal principal) + private List buildGrants(ConnectorSession session, SchemaTableName tableName, PrestoPrincipal principal) { ImmutableList.Builder result = ImmutableList.builder(); - Set hivePrivileges = metastore.listTablePrivileges(tableName.getSchemaName(), tableName.getTableName(), principal); + Set hivePrivileges = metastore.listTablePrivileges(new MetastoreContext(session.getIdentity()), tableName.getSchemaName(), tableName.getTableName(), principal); for (HivePrivilegeInfo hivePrivilege : hivePrivileges) { Set prestoPrivileges = hivePrivilege.toPrivilegeInfo(); for (PrivilegeInfo prestoPrivilege : prestoPrivileges) { diff --git a/presto-hive/src/main/java/com/facebook/presto/hive/HiveMetadataFactory.java b/presto-hive/src/main/java/com/facebook/presto/hive/HiveMetadataFactory.java index 34dbfd3cae015..c69d289df6e89 100644 --- a/presto-hive/src/main/java/com/facebook/presto/hive/HiveMetadataFactory.java +++ b/presto-hive/src/main/java/com/facebook/presto/hive/HiveMetadataFactory.java @@ -45,6 +45,7 @@ public class HiveMetadataFactory private final boolean createsOfNonManagedTablesEnabled; private final int maxPartitionBatchSize; private final long perTransactionCacheMaximumSize; + private final boolean metastoreImpersonationEnabled; private final ExtendedHiveMetastore metastore; private final HdfsEnvironment hdfsEnvironment; private final HivePartitionManager partitionManager; @@ -104,6 +105,7 @@ public HiveMetadataFactory( hiveClientConfig.isUndoMetastoreOperationsEnabled(), hiveClientConfig.getMaxPartitionBatchSize(), metastoreClientConfig.getPerTransactionMetastoreCacheMaximumSize(), + metastoreClientConfig.isMetastoreImpersonationEnabled(), typeManager, locationService, functionResolution, @@ -135,6 +137,7 @@ public HiveMetadataFactory( boolean undoMetastoreOperationsEnabled, int maxPartitionBatchSize, long perTransactionCacheMaximumSize, + boolean metastoreImpersonationEnabled, TypeManager typeManager, LocationService locationService, StandardFunctionResolution functionResolution, @@ -160,7 +163,7 @@ public HiveMetadataFactory( this.undoMetastoreOperationsEnabled = undoMetastoreOperationsEnabled; this.maxPartitionBatchSize = maxPartitionBatchSize; this.perTransactionCacheMaximumSize = perTransactionCacheMaximumSize; - + this.metastoreImpersonationEnabled = metastoreImpersonationEnabled; this.metastore = requireNonNull(metastore, "metastore is null"); this.hdfsEnvironment = requireNonNull(hdfsEnvironment, "hdfsEnvironment is null"); this.partitionManager = requireNonNull(partitionManager, "partitionManager is null"); @@ -195,7 +198,7 @@ public HiveMetadata get() { SemiTransactionalHiveMetastore metastore = new SemiTransactionalHiveMetastore( hdfsEnvironment, - CachingHiveMetastore.memoizeMetastore(this.metastore, perTransactionCacheMaximumSize), // per-transaction cache + CachingHiveMetastore.memoizeMetastore(this.metastore, metastoreImpersonationEnabled, perTransactionCacheMaximumSize), // per-transaction cache fileRenameExecutor, skipDeletionForAlter, skipTargetCleanupOnRollback, diff --git a/presto-hive/src/main/java/com/facebook/presto/hive/HivePageSinkProvider.java b/presto-hive/src/main/java/com/facebook/presto/hive/HivePageSinkProvider.java index 7984aec278e32..e391edcfb92c3 100644 --- a/presto-hive/src/main/java/com/facebook/presto/hive/HivePageSinkProvider.java +++ b/presto-hive/src/main/java/com/facebook/presto/hive/HivePageSinkProvider.java @@ -18,6 +18,7 @@ import com.facebook.presto.common.type.TypeManager; import com.facebook.presto.hive.metastore.ExtendedHiveMetastore; import com.facebook.presto.hive.metastore.HivePageSinkMetadataProvider; +import com.facebook.presto.hive.metastore.MetastoreContext; import com.facebook.presto.hive.metastore.SortingColumn; import com.facebook.presto.spi.ConnectorInsertTableHandle; import com.facebook.presto.spi.ConnectorOutputTableHandle; @@ -72,6 +73,7 @@ public class HivePageSinkProvider private final HiveWriterStats hiveWriterStats; private final OrcFileWriterFactory orcFileWriterFactory; private final long perTransactionMetastoreCacheMaximumSize; + private final boolean metastoreImpersonationEnabled; @Inject public HivePageSinkProvider( @@ -112,6 +114,7 @@ public HivePageSinkProvider( this.hiveWriterStats = requireNonNull(hiveWriterStats, "stats is null"); this.orcFileWriterFactory = requireNonNull(orcFileWriterFactory, "orcFileWriterFactory is null"); this.perTransactionMetastoreCacheMaximumSize = metastoreClientConfig.getPerTransactionMetastoreCacheMaximumSize(); + this.metastoreImpersonationEnabled = metastoreClientConfig.isMetastoreImpersonationEnabled(); } @Override @@ -162,7 +165,7 @@ private ConnectorPageSink createPageSink(HiveWritableTableHandle handle, boolean handle.getFilePrefix(), // The scope of metastore cache is within a single HivePageSink object // TODO: Extend metastore cache scope to the entire transaction - new HivePageSinkMetadataProvider(handle.getPageSinkMetadata(), memoizeMetastore(metastore, perTransactionMetastoreCacheMaximumSize)), + new HivePageSinkMetadataProvider(handle.getPageSinkMetadata(), memoizeMetastore(metastore, metastoreImpersonationEnabled, perTransactionMetastoreCacheMaximumSize), new MetastoreContext(session.getIdentity())), typeManager, hdfsEnvironment, pageSorter, diff --git a/presto-hive/src/main/java/com/facebook/presto/hive/HivePartitionManager.java b/presto-hive/src/main/java/com/facebook/presto/hive/HivePartitionManager.java index 4c853f9d42fa1..74c3e8f514a63 100644 --- a/presto-hive/src/main/java/com/facebook/presto/hive/HivePartitionManager.java +++ b/presto-hive/src/main/java/com/facebook/presto/hive/HivePartitionManager.java @@ -22,6 +22,7 @@ import com.facebook.presto.common.type.VarcharType; import com.facebook.presto.hive.HiveBucketing.HiveBucketFilter; import com.facebook.presto.hive.metastore.Column; +import com.facebook.presto.hive.metastore.MetastoreContext; import com.facebook.presto.hive.metastore.SemiTransactionalHiveMetastore; import com.facebook.presto.hive.metastore.Table; import com.facebook.presto.spi.ColumnHandle; @@ -115,7 +116,7 @@ public Iterable getPartitionsIterator( TupleDomain effectivePredicateColumnHandles = constraint.getSummary(); SchemaTableName tableName = hiveTableHandle.getSchemaTableName(); - Table table = getTable(metastore, tableName, isOfflineDataDebugModeEnabled(session)); + Table table = getTable(session, metastore, tableName, isOfflineDataDebugModeEnabled(session)); List partitionColumns = getPartitionKeyColumnHandles(table); @@ -133,7 +134,7 @@ public Iterable getPartitionsIterator( } else { return () -> { - List filteredPartitionNames = getFilteredPartitionNames(metastore, tableName, effectivePredicate); + List filteredPartitionNames = getFilteredPartitionNames(session, metastore, tableName, effectivePredicate); return filteredPartitionNames.stream() // Apply extra filters which could not be done by getFilteredPartitionNames .map(partitionName -> parseValuesAndFilterPartition(tableName, partitionName, partitionColumns, partitionTypes, constraint)) @@ -188,7 +189,7 @@ public HivePartitionResult getPartitions(SemiTransactionalHiveMetastore metastor TupleDomain effectivePredicate = constraint.getSummary(); SchemaTableName tableName = hiveTableHandle.getSchemaTableName(); - Table table = getTable(metastore, tableName, isOfflineDataDebugModeEnabled(session)); + Table table = getTable(session, metastore, tableName, isOfflineDataDebugModeEnabled(session)); List partitionColumns = getPartitionKeyColumnHandles(table); @@ -312,7 +313,7 @@ public HivePartitionResult getPartitions(SemiTransactionalHiveMetastore metastor HiveTableHandle hiveTableHandle = (HiveTableHandle) tableHandle; SchemaTableName tableName = hiveTableHandle.getSchemaTableName(); - Table table = getTable(metastore, tableName, isOfflineDataDebugModeEnabled(session)); + Table table = getTable(session, metastore, tableName, isOfflineDataDebugModeEnabled(session)); List partitionColumns = getPartitionKeyColumnHandles(table); List partitionColumnTypes = partitionColumns.stream() @@ -363,9 +364,9 @@ private Optional parseValuesAndFilterPartition( return Optional.of(partition); } - private Table getTable(SemiTransactionalHiveMetastore metastore, SchemaTableName tableName, boolean offlineDataDebugModeEnabled) + private Table getTable(ConnectorSession session, SemiTransactionalHiveMetastore metastore, SchemaTableName tableName, boolean offlineDataDebugModeEnabled) { - Optional
target = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()); + Optional
target = metastore.getTable(new MetastoreContext(session.getIdentity()), tableName.getSchemaName(), tableName.getTableName()); if (!target.isPresent()) { throw new TableNotFoundException(tableName); } @@ -378,14 +379,14 @@ private Table getTable(SemiTransactionalHiveMetastore metastore, SchemaTableName return table; } - private List getFilteredPartitionNames(SemiTransactionalHiveMetastore metastore, SchemaTableName tableName, Map partitionPredicates) + private List getFilteredPartitionNames(ConnectorSession session, SemiTransactionalHiveMetastore metastore, SchemaTableName tableName, Map partitionPredicates) { if (partitionPredicates.isEmpty()) { return ImmutableList.of(); } // fetch the partition names - return metastore.getPartitionNamesByFilter(tableName.getSchemaName(), tableName.getTableName(), partitionPredicates) + return metastore.getPartitionNamesByFilter(new MetastoreContext(session.getIdentity()), tableName.getSchemaName(), tableName.getTableName(), partitionPredicates) .orElseThrow(() -> new TableNotFoundException(tableName)); } diff --git a/presto-hive/src/main/java/com/facebook/presto/hive/HiveSplitManager.java b/presto-hive/src/main/java/com/facebook/presto/hive/HiveSplitManager.java index 133c91067faba..da26482bb5ed9 100644 --- a/presto-hive/src/main/java/com/facebook/presto/hive/HiveSplitManager.java +++ b/presto-hive/src/main/java/com/facebook/presto/hive/HiveSplitManager.java @@ -29,6 +29,7 @@ import com.facebook.presto.hive.metastore.DoubleStatistics; import com.facebook.presto.hive.metastore.HiveColumnStatistics; import com.facebook.presto.hive.metastore.IntegerStatistics; +import com.facebook.presto.hive.metastore.MetastoreContext; import com.facebook.presto.hive.metastore.Partition; import com.facebook.presto.hive.metastore.PartitionStatistics; import com.facebook.presto.hive.metastore.SemiTransactionalHiveMetastore; @@ -216,7 +217,7 @@ public ConnectorSplitSource getSplits( throw new PrestoException(HIVE_TRANSACTION_NOT_FOUND, format("Transaction not found: %s", transaction)); } SemiTransactionalHiveMetastore metastore = metadata.getMetastore(); - Table table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()) + Table table = metastore.getTable(new MetastoreContext(session.getIdentity()), tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new TableNotFoundException(tableName)); if (!isOfflineDataDebugModeEnabled(session)) { @@ -527,13 +528,16 @@ private Map getPartitionSplitInfo( Map predicateColumns, Optional> domains) { + MetastoreContext metastoreContext = new MetastoreContext(session.getIdentity()); Map> partitions = metastore.getPartitionsByNames( + metastoreContext, tableName.getSchemaName(), tableName.getTableName(), Lists.transform(partitionBatch, HivePartition::getPartitionId)); Map partitionStatistics = ImmutableMap.of(); if (domains.isPresent() && isPartitionStatisticsBasedOptimizationEnabled(session)) { partitionStatistics = metastore.getPartitionStatistics( + metastoreContext, tableName.getSchemaName(), tableName.getTableName(), partitionBatch.stream() diff --git a/presto-hive/src/main/java/com/facebook/presto/hive/HiveWriteUtils.java b/presto-hive/src/main/java/com/facebook/presto/hive/HiveWriteUtils.java index abb30142f0dcf..654b04ec80d5e 100644 --- a/presto-hive/src/main/java/com/facebook/presto/hive/HiveWriteUtils.java +++ b/presto-hive/src/main/java/com/facebook/presto/hive/HiveWriteUtils.java @@ -30,6 +30,7 @@ import com.facebook.presto.common.type.VarcharType; import com.facebook.presto.hive.RecordFileWriter.ExtendedRecordWriter; import com.facebook.presto.hive.metastore.Database; +import com.facebook.presto.hive.metastore.MetastoreContext; import com.facebook.presto.hive.metastore.Partition; import com.facebook.presto.hive.metastore.PrestoTableType; import com.facebook.presto.hive.metastore.SemiTransactionalHiveMetastore; @@ -40,6 +41,7 @@ import com.facebook.presto.spi.PrestoException; import com.facebook.presto.spi.SchemaNotFoundException; import com.facebook.presto.spi.SchemaTableName; +import com.facebook.presto.spi.security.ConnectorIdentity; import com.google.common.collect.ImmutableList; import com.google.common.primitives.Shorts; import com.google.common.primitives.SignedBytes; @@ -351,7 +353,7 @@ private static void checkWritable( public static Path getTableDefaultLocation(ConnectorSession session, SemiTransactionalHiveMetastore metastore, HdfsEnvironment hdfsEnvironment, String schemaName, String tableName) { - Optional location = getDatabase(metastore, schemaName).getLocation(); + Optional location = getDatabase(session.getIdentity(), metastore, schemaName).getLocation(); if (!location.isPresent() || location.get().isEmpty()) { throw new PrestoException(HIVE_DATABASE_LOCATION_ERROR, format("Database '%s' location is not set", schemaName)); } @@ -370,9 +372,9 @@ public static Path getTableDefaultLocation(ConnectorSession session, SemiTransac return new Path(databasePath, tableName); } - private static Database getDatabase(SemiTransactionalHiveMetastore metastore, String database) + private static Database getDatabase(ConnectorIdentity identity, SemiTransactionalHiveMetastore metastore, String database) { - return metastore.getDatabase(database).orElseThrow(() -> new SchemaNotFoundException(database)); + return metastore.getDatabase(new MetastoreContext(identity), database).orElseThrow(() -> new SchemaNotFoundException(database)); } public static boolean isS3FileSystem(HdfsContext context, HdfsEnvironment hdfsEnvironment, Path path) diff --git a/presto-hive/src/main/java/com/facebook/presto/hive/SyncPartitionMetadataProcedure.java b/presto-hive/src/main/java/com/facebook/presto/hive/SyncPartitionMetadataProcedure.java index 848dc83712eca..d096958f06303 100644 --- a/presto-hive/src/main/java/com/facebook/presto/hive/SyncPartitionMetadataProcedure.java +++ b/presto-hive/src/main/java/com/facebook/presto/hive/SyncPartitionMetadataProcedure.java @@ -14,6 +14,7 @@ package com.facebook.presto.hive; import com.facebook.presto.hive.metastore.Column; +import com.facebook.presto.hive.metastore.MetastoreContext; import com.facebook.presto.hive.metastore.Partition; import com.facebook.presto.hive.metastore.PartitionStatistics; import com.facebook.presto.hive.metastore.SemiTransactionalHiveMetastore; @@ -111,7 +112,7 @@ private void doSyncPartitionMetadata(ConnectorSession session, String schemaName SemiTransactionalHiveMetastore metastore = hiveMetadataFactory.get().getMetastore(); SchemaTableName schemaTableName = new SchemaTableName(schemaName, tableName); - Table table = metastore.getTable(schemaName, tableName) + Table table = metastore.getTable(new MetastoreContext(session.getIdentity()), schemaName, tableName) .orElseThrow(() -> new TableNotFoundException(schemaTableName)); if (table.getPartitionColumns().isEmpty()) { throw new PrestoException(INVALID_PROCEDURE_ARGUMENT, "Table is not partitioned: " + schemaTableName); @@ -124,7 +125,7 @@ private void doSyncPartitionMetadata(ConnectorSession session, String schemaName try { FileSystem fileSystem = hdfsEnvironment.getFileSystem(context, tableLocation); - List partitionsInMetastore = metastore.getPartitionNames(schemaName, tableName) + List partitionsInMetastore = metastore.getPartitionNames(new MetastoreContext(session.getIdentity()), schemaName, tableName) .orElseThrow(() -> new TableNotFoundException(schemaTableName)); List partitionsInFileSystem = listDirectory(fileSystem, fileSystem.getFileStatus(tableLocation), table.getPartitionColumns(), table.getPartitionColumns().size(), caseSensitive).stream() .map(fileStatus -> fileStatus.getPath().toUri()) diff --git a/presto-hive/src/main/java/com/facebook/presto/hive/authentication/KerberosHiveMetastoreAuthentication.java b/presto-hive/src/main/java/com/facebook/presto/hive/authentication/KerberosHiveMetastoreAuthentication.java index 793a836b203e4..582e664be0612 100644 --- a/presto-hive/src/main/java/com/facebook/presto/hive/authentication/KerberosHiveMetastoreAuthentication.java +++ b/presto-hive/src/main/java/com/facebook/presto/hive/authentication/KerberosHiveMetastoreAuthentication.java @@ -16,26 +16,43 @@ import com.facebook.presto.hive.ForHiveMetastore; import com.facebook.presto.hive.HiveClientConfig; import com.google.common.collect.ImmutableMap; +import org.apache.hadoop.hive.metastore.security.DelegationTokenIdentifier; import org.apache.hadoop.hive.thrift.client.TUGIAssumingTransport; import org.apache.hadoop.security.SaslRpcServer; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.thrift.transport.TSaslClientTransport; import org.apache.thrift.transport.TTransport; import javax.inject.Inject; -import javax.security.sasl.Sasl; +import javax.security.auth.callback.Callback; +import javax.security.auth.callback.CallbackHandler; +import javax.security.auth.callback.NameCallback; +import javax.security.auth.callback.PasswordCallback; +import javax.security.auth.callback.UnsupportedCallbackException; +import javax.security.sasl.RealmCallback; +import javax.security.sasl.RealmChoiceCallback; import java.io.IOException; import java.io.UncheckedIOException; +import java.util.Base64; import java.util.Map; +import java.util.Optional; import static com.google.common.base.Preconditions.checkState; import static java.util.Objects.requireNonNull; +import static javax.security.sasl.Sasl.QOP; +import static javax.security.sasl.Sasl.SERVER_AUTH; import static org.apache.hadoop.security.SaslRpcServer.AuthMethod.KERBEROS; +import static org.apache.hadoop.security.SaslRpcServer.AuthMethod.TOKEN; +import static org.apache.hadoop.security.SaslRpcServer.SASL_DEFAULT_REALM; import static org.apache.hadoop.security.SecurityUtil.getServerPrincipal; public class KerberosHiveMetastoreAuthentication implements HiveMetastoreAuthentication { + private static final Map SASL_PROPERTIES = ImmutableMap.of(QOP, "auth", SERVER_AUTH, "true"); private final String hiveMetastoreServicePrincipal; private final HadoopAuthentication authentication; private final boolean hdfsWireEncryptionEnabled; @@ -57,7 +74,82 @@ public KerberosHiveMetastoreAuthentication(String hiveMetastoreServicePrincipal, } @Override - public TTransport authenticate(TTransport rawTransport, String hiveMetastoreHost) + public TTransport authenticate(TTransport rawTransport, String hiveMetastoreHost, Optional tokenString) + { + return tokenString.map(s -> authenticateWithToken(rawTransport, s)).orElseGet(() -> authenticateWithHost(rawTransport, hiveMetastoreHost)); + } + + private TTransport authenticateWithToken(TTransport rawTransport, String tokenString) + { + try { + Token token = new Token(); + token.decodeFromUrlString(tokenString); + + TTransport saslTransport = new TSaslClientTransport( + TOKEN.getMechanismName(), + null, + null, + SASL_DEFAULT_REALM, + SASL_PROPERTIES, + new SaslClientCallbackHandler(token), + rawTransport); + return new TUGIAssumingTransport(saslTransport, UserGroupInformation.getCurrentUser()); + } + catch (IOException ex) { + throw new UncheckedIOException(ex); + } + } + + private static class SaslClientCallbackHandler + implements CallbackHandler + { + private final String userName; + private final char[] userPassword; + + public SaslClientCallbackHandler(Token token) + { + this.userName = encodeIdentifier(token.getIdentifier()); + this.userPassword = encodePassword(token.getPassword()); + } + + @Override + public void handle(Callback[] callbacks) + throws UnsupportedCallbackException + { + for (Callback callback : callbacks) { + if (callback instanceof RealmChoiceCallback) { + continue; + } + else if (callback instanceof NameCallback) { + NameCallback nameCallback = (NameCallback) callback; + nameCallback.setName(userName); + } + else if (callback instanceof PasswordCallback) { + PasswordCallback passwordCallback = (PasswordCallback) callback; + passwordCallback.setPassword(userPassword); + } + else if (callback instanceof RealmCallback) { + RealmCallback realmCallback = (RealmCallback) callback; + realmCallback.setText(realmCallback.getDefaultText()); + } + else { + throw new UnsupportedCallbackException(callback, "Unrecognized SASL client callback"); + } + } + } + + private static String encodeIdentifier(byte[] identifier) + { + return Base64.getEncoder().encodeToString(identifier); + } + + private static char[] encodePassword(byte[] password) + { + return Base64.getEncoder().encodeToString(password).toCharArray(); + } + } + + private TTransport authenticateWithHost(TTransport rawTransport, String hiveMetastoreHost) { try { String serverPrincipal = getServerPrincipal(hiveMetastoreServicePrincipal, hiveMetastoreHost); @@ -66,8 +158,8 @@ public TTransport authenticate(TTransport rawTransport, String hiveMetastoreHost "Kerberos principal name does NOT have the expected hostname part: %s", serverPrincipal); Map saslProps = ImmutableMap.of( - Sasl.QOP, hdfsWireEncryptionEnabled ? "auth-conf" : "auth", - Sasl.SERVER_AUTH, "true"); + QOP, hdfsWireEncryptionEnabled ? "auth-conf" : "auth", + SERVER_AUTH, "true"); TTransport saslTransport = new TSaslClientTransport( KERBEROS.getMechanismName(), diff --git a/presto-hive/src/main/java/com/facebook/presto/hive/authentication/UserGroupInformationUtils.java b/presto-hive/src/main/java/com/facebook/presto/hive/authentication/UserGroupInformationUtils.java index 5656fbc736683..3c8f91dc44ed6 100644 --- a/presto-hive/src/main/java/com/facebook/presto/hive/authentication/UserGroupInformationUtils.java +++ b/presto-hive/src/main/java/com/facebook/presto/hive/authentication/UserGroupInformationUtils.java @@ -17,11 +17,11 @@ import java.security.PrivilegedAction; -final class UserGroupInformationUtils +public final class UserGroupInformationUtils { private UserGroupInformationUtils() {} - static R executeActionInDoAs(UserGroupInformation userGroupInformation, GenericExceptionAction action) + public static R executeActionInDoAs(UserGroupInformation userGroupInformation, GenericExceptionAction action) throws E { return userGroupInformation.doAs((PrivilegedAction>) () -> { diff --git a/presto-hive/src/main/java/com/facebook/presto/hive/rule/HiveFilterPushdown.java b/presto-hive/src/main/java/com/facebook/presto/hive/rule/HiveFilterPushdown.java index d26cbd34e2376..07d807ecc95d7 100644 --- a/presto-hive/src/main/java/com/facebook/presto/hive/rule/HiveFilterPushdown.java +++ b/presto-hive/src/main/java/com/facebook/presto/hive/rule/HiveFilterPushdown.java @@ -32,6 +32,7 @@ import com.facebook.presto.hive.HiveTransactionManager; import com.facebook.presto.hive.SubfieldExtractor; import com.facebook.presto.hive.metastore.Column; +import com.facebook.presto.hive.metastore.MetastoreContext; import com.facebook.presto.hive.metastore.SemiTransactionalHiveMetastore; import com.facebook.presto.hive.metastore.Table; import com.facebook.presto.spi.ColumnHandle; @@ -261,7 +262,7 @@ public static ConnectorPushdownFilterResult pushdownFilter( RowExpression remainingExpression = extractStaticConjuncts(conjuncts, logicalRowExpressions); remainingExpression = removeNestedDynamicFilters(remainingExpression); - Table table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()) + Table table = metastore.getTable(new MetastoreContext(session.getIdentity()), tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new TableNotFoundException(tableName)); return new ConnectorPushdownFilterResult( metadata.getTableLayout( diff --git a/presto-hive/src/main/java/com/facebook/presto/hive/security/LegacyAccessControl.java b/presto-hive/src/main/java/com/facebook/presto/hive/security/LegacyAccessControl.java index 0a36da6ea86aa..a47b555d0fefa 100644 --- a/presto-hive/src/main/java/com/facebook/presto/hive/security/LegacyAccessControl.java +++ b/presto-hive/src/main/java/com/facebook/presto/hive/security/LegacyAccessControl.java @@ -15,6 +15,7 @@ import com.facebook.presto.hive.HiveTransactionManager; import com.facebook.presto.hive.TransactionalMetadata; +import com.facebook.presto.hive.metastore.MetastoreContext; import com.facebook.presto.hive.metastore.Table; import com.facebook.presto.spi.SchemaTableName; import com.facebook.presto.spi.connector.ConnectorAccessControl; @@ -100,7 +101,7 @@ public void checkCanDropTable(ConnectorTransactionHandle transaction, ConnectorI } TransactionalMetadata metadata = hiveTransactionManager.get(transaction); - Optional
target = metadata.getMetastore().getTable(tableName.getSchemaName(), tableName.getTableName()); + Optional
target = metadata.getMetastore().getTable(new MetastoreContext(identity), tableName.getSchemaName(), tableName.getTableName()); if (!target.isPresent()) { denyDropTable(tableName.toString(), "Table not found"); diff --git a/presto-hive/src/main/java/com/facebook/presto/hive/security/SqlStandardAccessControl.java b/presto-hive/src/main/java/com/facebook/presto/hive/security/SqlStandardAccessControl.java index b622bf006a755..85c180f16383b 100644 --- a/presto-hive/src/main/java/com/facebook/presto/hive/security/SqlStandardAccessControl.java +++ b/presto-hive/src/main/java/com/facebook/presto/hive/security/SqlStandardAccessControl.java @@ -17,6 +17,7 @@ import com.facebook.presto.hive.HiveTransactionManager; import com.facebook.presto.hive.TransactionalMetadata; import com.facebook.presto.hive.metastore.Database; +import com.facebook.presto.hive.metastore.MetastoreContext; import com.facebook.presto.hive.metastore.SemiTransactionalHiveMetastore; import com.facebook.presto.spi.SchemaTableName; import com.facebook.presto.spi.connector.ConnectorAccessControl; @@ -319,7 +320,7 @@ public void checkCanRevokeRoles(ConnectorTransactionHandle transactionHandle, Co public void checkCanSetRole(ConnectorTransactionHandle transaction, ConnectorIdentity identity, AccessControlContext context, String role, String catalogName) { SemiTransactionalHiveMetastore metastore = getMetastore(transaction); - if (!isRoleApplicable(metastore, new PrestoPrincipal(USER, identity.getUser()), role)) { + if (!isRoleApplicable(metastore, identity, new PrestoPrincipal(USER, identity.getUser()), role)) { denySetRole(role); } } @@ -345,7 +346,7 @@ public void checkCanShowRoleGrants(ConnectorTransactionHandle transactionHandle, private boolean isAdmin(ConnectorTransactionHandle transaction, ConnectorIdentity identity) { SemiTransactionalHiveMetastore metastore = getMetastore(transaction); - return isRoleEnabled(identity, metastore::listRoleGrants, ADMIN_ROLE_NAME); + return isRoleEnabled(identity, (PrestoPrincipal p) -> metastore.listRoleGrants(new MetastoreContext(identity), p), ADMIN_ROLE_NAME); } private boolean isDatabaseOwner(ConnectorTransactionHandle transaction, ConnectorIdentity identity, String databaseName) @@ -360,7 +361,7 @@ private boolean isDatabaseOwner(ConnectorTransactionHandle transaction, Connecto } SemiTransactionalHiveMetastore metastore = getMetastore(transaction); - Optional databaseMetadata = metastore.getDatabase(databaseName); + Optional databaseMetadata = metastore.getDatabase(new MetastoreContext(identity), databaseName); if (!databaseMetadata.isPresent()) { return false; } @@ -371,7 +372,7 @@ private boolean isDatabaseOwner(ConnectorTransactionHandle transaction, Connecto if (database.getOwnerType() == USER && identity.getUser().equals(database.getOwnerName())) { return true; } - if (database.getOwnerType() == ROLE && isRoleEnabled(identity, metastore::listRoleGrants, database.getOwnerName())) { + if (database.getOwnerType() == ROLE && isRoleEnabled(identity, (PrestoPrincipal p) -> metastore.listRoleGrants(new MetastoreContext(identity), p), database.getOwnerName())) { return true; } return false; @@ -416,6 +417,7 @@ private boolean hasGrantOptionForPrivilege(ConnectorTransactionHandle transactio SemiTransactionalHiveMetastore metastore = getMetastore(transaction); return listApplicableTablePrivileges( metastore, + identity, tableName.getSchemaName(), tableName.getTableName(), identity.getUser()) @@ -429,7 +431,8 @@ private boolean hasAdminOptionForRoles(ConnectorTransactionHandle transaction, C } SemiTransactionalHiveMetastore metastore = getMetastore(transaction); - Set rolesWithGrantOption = listApplicableRoles(new PrestoPrincipal(USER, identity.getUser()), metastore::listRoleGrants) + Set rolesWithGrantOption = listApplicableRoles(new PrestoPrincipal(USER, identity.getUser()), (PrestoPrincipal p) -> metastore.listRoleGrants(new MetastoreContext(identity), p)) + .filter(RoleGrant::isGrantable) .map(RoleGrant::getRoleName) .collect(toSet()); diff --git a/presto-hive/src/main/java/com/facebook/presto/hive/statistics/MetastoreHiveStatisticsProvider.java b/presto-hive/src/main/java/com/facebook/presto/hive/statistics/MetastoreHiveStatisticsProvider.java index 83746c04c3b18..93095e298f59f 100644 --- a/presto-hive/src/main/java/com/facebook/presto/hive/statistics/MetastoreHiveStatisticsProvider.java +++ b/presto-hive/src/main/java/com/facebook/presto/hive/statistics/MetastoreHiveStatisticsProvider.java @@ -27,6 +27,7 @@ import com.facebook.presto.hive.metastore.DoubleStatistics; import com.facebook.presto.hive.metastore.HiveColumnStatistics; import com.facebook.presto.hive.metastore.IntegerStatistics; +import com.facebook.presto.hive.metastore.MetastoreContext; import com.facebook.presto.hive.metastore.PartitionStatistics; import com.facebook.presto.hive.metastore.SemiTransactionalHiveMetastore; import com.facebook.presto.spi.ColumnHandle; @@ -99,7 +100,7 @@ public class MetastoreHiveStatisticsProvider public MetastoreHiveStatisticsProvider(SemiTransactionalHiveMetastore metastore) { requireNonNull(metastore, "metastore is null"); - this.statisticsProvider = (table, hivePartitions) -> getPartitionsStatistics(metastore, table, hivePartitions); + this.statisticsProvider = (session, table, hivePartitions) -> getPartitionsStatistics(session, metastore, table, hivePartitions); } @VisibleForTesting @@ -108,20 +109,21 @@ public MetastoreHiveStatisticsProvider(SemiTransactionalHiveMetastore metastore) this.statisticsProvider = requireNonNull(statisticsProvider, "statisticsProvider is null"); } - private static Map getPartitionsStatistics(SemiTransactionalHiveMetastore metastore, SchemaTableName table, List hivePartitions) + private static Map getPartitionsStatistics(ConnectorSession session, SemiTransactionalHiveMetastore metastore, SchemaTableName table, List hivePartitions) { if (hivePartitions.isEmpty()) { return ImmutableMap.of(); } boolean unpartitioned = hivePartitions.stream().anyMatch(partition -> partition.getPartitionId().equals(UNPARTITIONED_ID)); + MetastoreContext metastoreContext = new MetastoreContext(session.getIdentity()); if (unpartitioned) { checkArgument(hivePartitions.size() == 1, "expected only one hive partition"); - return ImmutableMap.of(UNPARTITIONED_ID, metastore.getTableStatistics(table.getSchemaName(), table.getTableName())); + return ImmutableMap.of(UNPARTITIONED_ID, metastore.getTableStatistics(metastoreContext, table.getSchemaName(), table.getTableName())); } Set partitionNames = hivePartitions.stream() .map(HivePartition::getPartitionId) .collect(toImmutableSet()); - return metastore.getPartitionStatistics(table.getSchemaName(), table.getTableName(), partitionNames); + return metastore.getPartitionStatistics(metastoreContext, table.getSchemaName(), table.getTableName(), partitionNames); } @Override @@ -141,7 +143,7 @@ public TableStatistics getTableStatistics( int sampleSize = getPartitionStatisticsSampleSize(session); List partitionsSample = getPartitionsSample(partitions, sampleSize); try { - Map statisticsSample = statisticsProvider.getPartitionsStatistics(table, partitionsSample); + Map statisticsSample = statisticsProvider.getPartitionsStatistics(session, table, partitionsSample); validatePartitionStatistics(table, statisticsSample); return getTableStatistics(columns, columnTypes, partitions, statisticsSample); } @@ -868,6 +870,6 @@ private static Optional createDecimalRange(DecimalStatistics statis @VisibleForTesting interface PartitionsStatisticsProvider { - Map getPartitionsStatistics(SchemaTableName table, List hivePartitions); + Map getPartitionsStatistics(ConnectorSession session, SchemaTableName table, List hivePartitions); } } diff --git a/presto-hive/src/test/java/com/facebook/presto/hive/AbstractTestHiveClient.java b/presto-hive/src/test/java/com/facebook/presto/hive/AbstractTestHiveClient.java index 91c2bac037346..098fa264263c6 100644 --- a/presto-hive/src/test/java/com/facebook/presto/hive/AbstractTestHiveClient.java +++ b/presto-hive/src/test/java/com/facebook/presto/hive/AbstractTestHiveClient.java @@ -47,6 +47,7 @@ import com.facebook.presto.hive.metastore.HivePartitionMutator; import com.facebook.presto.hive.metastore.HivePrivilegeInfo; import com.facebook.presto.hive.metastore.HivePrivilegeInfo.HivePrivilege; +import com.facebook.presto.hive.metastore.MetastoreContext; import com.facebook.presto.hive.metastore.Partition; import com.facebook.presto.hive.metastore.PartitionStatistics; import com.facebook.presto.hive.metastore.PartitionWithStatistics; @@ -919,8 +920,9 @@ protected final void setup(String host, int port, String databaseName, String ti HiveCluster hiveCluster = new TestingHiveCluster(metastoreClientConfig, host, port); ExtendedHiveMetastore metastore = new CachingHiveMetastore( - new BridgingHiveMetastore(new ThriftHiveMetastore(hiveCluster), new HivePartitionMutator()), + new BridgingHiveMetastore(new ThriftHiveMetastore(hiveCluster, metastoreClientConfig), new HivePartitionMutator()), executor, + false, Duration.valueOf("1m"), Duration.valueOf("15s"), 10000, @@ -954,6 +956,7 @@ protected final void setup(String databaseName, HiveClientConfig hiveClientConfi true, getHiveClientConfig().getMaxPartitionBatchSize(), getHiveClientConfig().getMaxPartitionsPerScan(), + false, FUNCTION_AND_TYPE_MANAGER, locationService, FUNCTION_RESOLUTION, @@ -1396,7 +1399,7 @@ protected void doTestMismatchSchemaTable( try (Transaction transaction = newTransaction()) { ConnectorSession session = newSession(); PrincipalPrivileges principalPrivileges = testingPrincipalPrivilege(session); - Table oldTable = transaction.getMetastore().getTable(schemaName, tableName).get(); + Table oldTable = transaction.getMetastore().getTable(new MetastoreContext(session.getIdentity()), schemaName, tableName).get(); HiveTypeTranslator hiveTypeTranslator = new HiveTypeTranslator(); List dataColumns = tableAfter.stream() .filter(columnMetadata -> !columnMetadata.getName().equals("ds")) @@ -1404,9 +1407,7 @@ protected void doTestMismatchSchemaTable( .collect(toList()); Table.Builder newTable = Table.builder(oldTable) .setDataColumns(dataColumns); - - transaction.getMetastore().replaceView(schemaName, tableName, newTable.build(), principalPrivileges); - + transaction.getMetastore().replaceView(new MetastoreContext(session.getIdentity()), schemaName, tableName, newTable.build(), principalPrivileges); transaction.commit(); } @@ -2640,7 +2641,7 @@ private void assertEmptyFile(HiveStorageFormat format) List columnHandles = filterNonHiddenColumnHandles(metadata.getColumnHandles(session, tableHandle).values()); Table table = transaction.getMetastore() - .getTable(tableName.getSchemaName(), tableName.getTableName()) + .getTable(new MetastoreContext(session.getIdentity()), tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(AssertionError::new); // verify directory is empty @@ -2812,7 +2813,7 @@ public void testTableCreationIgnoreExisting() Table table = createSimpleTable(schemaTableName, columns, session, targetPath, "q1"); transaction.getMetastore() .createTable(session, table, privileges, Optional.empty(), false, EMPTY_TABLE_STATISTICS); - Optional
tableHandle = transaction.getMetastore().getTable(schemaName, tableName); + Optional
tableHandle = transaction.getMetastore().getTable(new MetastoreContext(session.getIdentity()), schemaName, tableName); assertTrue(tableHandle.isPresent()); transaction.commit(); } @@ -3409,29 +3410,29 @@ public void testUpdateTableColumnStatisticsEmptyOptionalFields() protected void testUpdateTableStatistics(SchemaTableName tableName, PartitionStatistics initialStatistics, PartitionStatistics... statistics) { ExtendedHiveMetastore metastoreClient = getMetastoreClient(); - assertThat(metastoreClient.getTableStatistics(tableName.getSchemaName(), tableName.getTableName())) + assertThat(metastoreClient.getTableStatistics(new MetastoreContext("test_user"), tableName.getSchemaName(), tableName.getTableName())) .isEqualTo(initialStatistics); AtomicReference expectedStatistics = new AtomicReference<>(initialStatistics); for (PartitionStatistics partitionStatistics : statistics) { - metastoreClient.updateTableStatistics(tableName.getSchemaName(), tableName.getTableName(), actualStatistics -> { + metastoreClient.updateTableStatistics(new MetastoreContext("test_user"), tableName.getSchemaName(), tableName.getTableName(), actualStatistics -> { assertThat(actualStatistics).isEqualTo(expectedStatistics.get()); return partitionStatistics; }); - assertThat(metastoreClient.getTableStatistics(tableName.getSchemaName(), tableName.getTableName())) + assertThat(metastoreClient.getTableStatistics(new MetastoreContext("test_user"), tableName.getSchemaName(), tableName.getTableName())) .isEqualTo(partitionStatistics); expectedStatistics.set(partitionStatistics); } - assertThat(metastoreClient.getTableStatistics(tableName.getSchemaName(), tableName.getTableName())) + assertThat(metastoreClient.getTableStatistics(new MetastoreContext("test_user"), tableName.getSchemaName(), tableName.getTableName())) .isEqualTo(expectedStatistics.get()); - metastoreClient.updateTableStatistics(tableName.getSchemaName(), tableName.getTableName(), actualStatistics -> { + metastoreClient.updateTableStatistics(new MetastoreContext("test_user"), tableName.getSchemaName(), tableName.getTableName(), actualStatistics -> { assertThat(actualStatistics).isEqualTo(expectedStatistics.get()); return initialStatistics; }); - assertThat(metastoreClient.getTableStatistics(tableName.getSchemaName(), tableName.getTableName())) + assertThat(metastoreClient.getTableStatistics(new MetastoreContext("test_user"), tableName.getSchemaName(), tableName.getTableName())) .isEqualTo(initialStatistics); } @@ -3556,7 +3557,7 @@ protected void createDummyPartitionedTable(SchemaTableName tableName, List new TableNotFoundException(tableName)); List firstPartitionValues = ImmutableList.of("2016-01-01"); @@ -3569,9 +3570,9 @@ protected void createDummyPartitionedTable(SchemaTableName tableName, List new PartitionWithStatistics(createDummyPartition(table, partitionName), partitionName, PartitionStatistics.empty())) .collect(toImmutableList()); - metastoreClient.addPartitions(tableName.getSchemaName(), tableName.getTableName(), partitions); - metastoreClient.updatePartitionStatistics(tableName.getSchemaName(), tableName.getTableName(), firstPartitionName, currentStatistics -> EMPTY_TABLE_STATISTICS); - metastoreClient.updatePartitionStatistics(tableName.getSchemaName(), tableName.getTableName(), secondPartitionName, currentStatistics -> EMPTY_TABLE_STATISTICS); + metastoreClient.addPartitions(new MetastoreContext("test_user"), tableName.getSchemaName(), tableName.getTableName(), partitions); + metastoreClient.updatePartitionStatistics(new MetastoreContext("test_user"), tableName.getSchemaName(), tableName.getTableName(), firstPartitionName, currentStatistics -> EMPTY_TABLE_STATISTICS); + metastoreClient.updatePartitionStatistics(new MetastoreContext("test_user"), tableName.getSchemaName(), tableName.getTableName(), secondPartitionName, currentStatistics -> EMPTY_TABLE_STATISTICS); } protected void testUpdatePartitionStatistics( @@ -3586,7 +3587,7 @@ protected void testUpdatePartitionStatistics( String secondPartitionName = "ds=2016-01-02"; ExtendedHiveMetastore metastoreClient = getMetastoreClient(); - assertThat(metastoreClient.getPartitionStatistics(tableName.getSchemaName(), tableName.getTableName(), ImmutableSet.of(firstPartitionName, secondPartitionName))) + assertThat(metastoreClient.getPartitionStatistics(new MetastoreContext("test_user"), tableName.getSchemaName(), tableName.getTableName(), ImmutableSet.of(firstPartitionName, secondPartitionName))) .isEqualTo(ImmutableMap.of(firstPartitionName, initialStatistics, secondPartitionName, initialStatistics)); AtomicReference expectedStatisticsPartition1 = new AtomicReference<>(initialStatistics); @@ -3595,31 +3596,31 @@ protected void testUpdatePartitionStatistics( for (int i = 0; i < firstPartitionStatistics.size(); i++) { PartitionStatistics statisticsPartition1 = firstPartitionStatistics.get(i); PartitionStatistics statisticsPartition2 = secondPartitionStatistics.get(i); - metastoreClient.updatePartitionStatistics(tableName.getSchemaName(), tableName.getTableName(), firstPartitionName, actualStatistics -> { + metastoreClient.updatePartitionStatistics(new MetastoreContext("test_user"), tableName.getSchemaName(), tableName.getTableName(), firstPartitionName, actualStatistics -> { assertThat(actualStatistics).isEqualTo(expectedStatisticsPartition1.get()); return statisticsPartition1; }); - metastoreClient.updatePartitionStatistics(tableName.getSchemaName(), tableName.getTableName(), secondPartitionName, actualStatistics -> { + metastoreClient.updatePartitionStatistics(new MetastoreContext("test_user"), tableName.getSchemaName(), tableName.getTableName(), secondPartitionName, actualStatistics -> { assertThat(actualStatistics).isEqualTo(expectedStatisticsPartition2.get()); return statisticsPartition2; }); - assertThat(metastoreClient.getPartitionStatistics(tableName.getSchemaName(), tableName.getTableName(), ImmutableSet.of(firstPartitionName, secondPartitionName))) + assertThat(metastoreClient.getPartitionStatistics(new MetastoreContext("test_user"), tableName.getSchemaName(), tableName.getTableName(), ImmutableSet.of(firstPartitionName, secondPartitionName))) .isEqualTo(ImmutableMap.of(firstPartitionName, statisticsPartition1, secondPartitionName, statisticsPartition2)); expectedStatisticsPartition1.set(statisticsPartition1); expectedStatisticsPartition2.set(statisticsPartition2); } - assertThat(metastoreClient.getPartitionStatistics(tableName.getSchemaName(), tableName.getTableName(), ImmutableSet.of(firstPartitionName, secondPartitionName))) + assertThat(metastoreClient.getPartitionStatistics(new MetastoreContext("test_user"), tableName.getSchemaName(), tableName.getTableName(), ImmutableSet.of(firstPartitionName, secondPartitionName))) .isEqualTo(ImmutableMap.of(firstPartitionName, expectedStatisticsPartition1.get(), secondPartitionName, expectedStatisticsPartition2.get())); - metastoreClient.updatePartitionStatistics(tableName.getSchemaName(), tableName.getTableName(), firstPartitionName, currentStatistics -> { + metastoreClient.updatePartitionStatistics(new MetastoreContext("test_user"), tableName.getSchemaName(), tableName.getTableName(), firstPartitionName, currentStatistics -> { assertThat(currentStatistics).isEqualTo(expectedStatisticsPartition1.get()); return initialStatistics; }); - metastoreClient.updatePartitionStatistics(tableName.getSchemaName(), tableName.getTableName(), secondPartitionName, currentStatistics -> { + metastoreClient.updatePartitionStatistics(new MetastoreContext("test_user"), tableName.getSchemaName(), tableName.getTableName(), secondPartitionName, currentStatistics -> { assertThat(currentStatistics).isEqualTo(expectedStatisticsPartition2.get()); return initialStatistics; }); - assertThat(metastoreClient.getPartitionStatistics(tableName.getSchemaName(), tableName.getTableName(), ImmutableSet.of(firstPartitionName, secondPartitionName))) + assertThat(metastoreClient.getPartitionStatistics(new MetastoreContext("test_user"), tableName.getSchemaName(), tableName.getTableName(), ImmutableSet.of(firstPartitionName, secondPartitionName))) .isEqualTo(ImmutableMap.of(firstPartitionName, initialStatistics, secondPartitionName, initialStatistics)); } @@ -3655,7 +3656,7 @@ protected void testStorePartitionWithStatistics( doCreateEmptyTable(tableName, ORC, columns); ExtendedHiveMetastore metastoreClient = getMetastoreClient(); - Table table = metastoreClient.getTable(tableName.getSchemaName(), tableName.getTableName()) + Table table = metastoreClient.getTable(new MetastoreContext("test_user"), tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new TableNotFoundException(tableName)); List partitionValues = ImmutableList.of("2016-01-01"); @@ -3664,11 +3665,11 @@ protected void testStorePartitionWithStatistics( Partition partition = createDummyPartition(table, partitionName); // create partition with stats for all columns - metastoreClient.addPartitions(tableName.getSchemaName(), tableName.getTableName(), ImmutableList.of(new PartitionWithStatistics(partition, partitionName, statsForAllColumns1))); + metastoreClient.addPartitions(new MetastoreContext("test_user"), tableName.getSchemaName(), tableName.getTableName(), ImmutableList.of(new PartitionWithStatistics(partition, partitionName, statsForAllColumns1))); assertEquals( - metastoreClient.getPartition(tableName.getSchemaName(), tableName.getTableName(), partitionValues).get().getStorage().getStorageFormat(), + metastoreClient.getPartition(new MetastoreContext("test_user"), tableName.getSchemaName(), tableName.getTableName(), partitionValues).get().getStorage().getStorageFormat(), fromHiveStorageFormat(ORC)); - assertThat(metastoreClient.getPartitionStatistics(tableName.getSchemaName(), tableName.getTableName(), ImmutableSet.of(partitionName))) + assertThat(metastoreClient.getPartitionStatistics(new MetastoreContext("test_user"), tableName.getSchemaName(), tableName.getTableName(), ImmutableSet.of(partitionName))) .isEqualTo(ImmutableMap.of(partitionName, statsForAllColumns1)); sleep(delayBetweenAlters.toMillis()); @@ -3679,11 +3680,11 @@ protected void testStorePartitionWithStatistics( .setStorageFormat(fromHiveStorageFormat(DWRF)) .setLocation(partitionTargetPath(tableName, partitionName))) .build(); - metastoreClient.alterPartition(tableName.getSchemaName(), tableName.getTableName(), new PartitionWithStatistics(modifiedPartition, partitionName, statsForAllColumns2)); + metastoreClient.alterPartition(new MetastoreContext("test_user"), tableName.getSchemaName(), tableName.getTableName(), new PartitionWithStatistics(modifiedPartition, partitionName, statsForAllColumns2)); assertEquals( - metastoreClient.getPartition(tableName.getSchemaName(), tableName.getTableName(), partitionValues).get().getStorage().getStorageFormat(), + metastoreClient.getPartition(new MetastoreContext("test_user"), tableName.getSchemaName(), tableName.getTableName(), partitionValues).get().getStorage().getStorageFormat(), fromHiveStorageFormat(DWRF)); - assertThat(metastoreClient.getPartitionStatistics(tableName.getSchemaName(), tableName.getTableName(), ImmutableSet.of(partitionName))) + assertThat(metastoreClient.getPartitionStatistics(new MetastoreContext("test_user"), tableName.getSchemaName(), tableName.getTableName(), ImmutableSet.of(partitionName))) .isEqualTo(ImmutableMap.of(partitionName, statsForAllColumns2)); sleep(delayBetweenAlters.toMillis()); @@ -3694,8 +3695,8 @@ protected void testStorePartitionWithStatistics( .setStorageFormat(fromHiveStorageFormat(TEXTFILE)) .setLocation(partitionTargetPath(tableName, partitionName))) .build(); - metastoreClient.alterPartition(tableName.getSchemaName(), tableName.getTableName(), new PartitionWithStatistics(modifiedPartition, partitionName, statsForSubsetOfColumns)); - assertThat(metastoreClient.getPartitionStatistics(tableName.getSchemaName(), tableName.getTableName(), ImmutableSet.of(partitionName))) + metastoreClient.alterPartition(new MetastoreContext("test_user"), tableName.getSchemaName(), tableName.getTableName(), new PartitionWithStatistics(modifiedPartition, partitionName, statsForSubsetOfColumns)); + assertThat(metastoreClient.getPartitionStatistics(new MetastoreContext("test_user"), tableName.getSchemaName(), tableName.getTableName(), ImmutableSet.of(partitionName))) .isEqualTo(ImmutableMap.of(partitionName, statsForSubsetOfColumns)); sleep(delayBetweenAlters.toMillis()); @@ -3706,8 +3707,8 @@ protected void testStorePartitionWithStatistics( .setStorageFormat(fromHiveStorageFormat(TEXTFILE)) .setLocation(partitionTargetPath(tableName, partitionName))) .build(); - metastoreClient.alterPartition(tableName.getSchemaName(), tableName.getTableName(), new PartitionWithStatistics(modifiedPartition, partitionName, emptyStatistics)); - assertThat(metastoreClient.getPartitionStatistics(tableName.getSchemaName(), tableName.getTableName(), ImmutableSet.of(partitionName))) + metastoreClient.alterPartition(new MetastoreContext("test_user"), tableName.getSchemaName(), tableName.getTableName(), new PartitionWithStatistics(modifiedPartition, partitionName, emptyStatistics)); + assertThat(metastoreClient.getPartitionStatistics(new MetastoreContext("test_user"), tableName.getSchemaName(), tableName.getTableName(), ImmutableSet.of(partitionName))) .isEqualTo(ImmutableMap.of(partitionName, emptyStatistics)); } finally { @@ -3743,7 +3744,7 @@ protected String partitionTargetPath(SchemaTableName schemaTableName, String par ConnectorSession session = newSession(); SemiTransactionalHiveMetastore metastore = transaction.getMetastore(); LocationService locationService = getLocationService(); - Table table = metastore.getTable(schemaTableName.getSchemaName(), schemaTableName.getTableName()).get(); + Table table = metastore.getTable(new MetastoreContext(session.getIdentity()), schemaTableName.getSchemaName(), schemaTableName.getTableName()).get(); LocationHandle handle = locationService.forExistingTable(metastore, session, table, false); return locationService.getPartitionWriteInfo(handle, Optional.empty(), partitionName).getTargetPath().toString(); } @@ -3757,8 +3758,8 @@ public void testAddColumn() try { doCreateEmptyTable(tableName, ORC, CREATE_TABLE_COLUMNS); ExtendedHiveMetastore metastoreClient = getMetastoreClient(); - metastoreClient.addColumn(tableName.getSchemaName(), tableName.getTableName(), "new_col", HIVE_LONG, null); - Optional
table = metastoreClient.getTable(tableName.getSchemaName(), tableName.getTableName()); + metastoreClient.addColumn(new MetastoreContext("test_user"), tableName.getSchemaName(), tableName.getTableName(), "new_col", HIVE_LONG, null); + Optional
table = metastoreClient.getTable(new MetastoreContext("test_user"), tableName.getSchemaName(), tableName.getTableName()); assertTrue(table.isPresent()); List columns = table.get().getDataColumns(); assertEquals(columns.get(columns.size() - 1).getName(), "new_col"); @@ -3776,8 +3777,8 @@ public void testDropColumn() try { doCreateEmptyTable(tableName, ORC, CREATE_TABLE_COLUMNS); ExtendedHiveMetastore metastoreClient = getMetastoreClient(); - metastoreClient.dropColumn(tableName.getSchemaName(), tableName.getTableName(), CREATE_TABLE_COLUMNS.get(0).getName()); - Optional
table = metastoreClient.getTable(tableName.getSchemaName(), tableName.getTableName()); + metastoreClient.dropColumn(new MetastoreContext("test_user"), tableName.getSchemaName(), tableName.getTableName(), CREATE_TABLE_COLUMNS.get(0).getName()); + Optional
table = metastoreClient.getTable(new MetastoreContext("test_user"), tableName.getSchemaName(), tableName.getTableName()); assertTrue(table.isPresent()); List columns = table.get().getDataColumns(); assertEquals(columns.get(0).getName(), CREATE_TABLE_COLUMNS.get(1).getName()); @@ -3807,8 +3808,8 @@ protected void testPartitionStatisticsSampling(List columns, Par try { createDummyPartitionedTable(tableName, columns); ExtendedHiveMetastore metastoreClient = getMetastoreClient(); - metastoreClient.updatePartitionStatistics(tableName.getSchemaName(), tableName.getTableName(), "ds=2016-01-01", actualStatistics -> statistics); - metastoreClient.updatePartitionStatistics(tableName.getSchemaName(), tableName.getTableName(), "ds=2016-01-02", actualStatistics -> statistics); + metastoreClient.updatePartitionStatistics(new MetastoreContext("test_user"), tableName.getSchemaName(), tableName.getTableName(), "ds=2016-01-01", actualStatistics -> statistics); + metastoreClient.updatePartitionStatistics(new MetastoreContext("test_user"), tableName.getSchemaName(), tableName.getTableName(), "ds=2016-01-02", actualStatistics -> statistics); try (Transaction transaction = newTransaction()) { ConnectorSession session = newSession(); @@ -3956,12 +3957,12 @@ protected void doCreateTable(SchemaTableName tableName, HiveStorageFormat storag assertEqualsIgnoreOrder(result.getMaterializedRows(), CREATE_TABLE_DATA.getMaterializedRows()); // verify the node version and query ID in table - Table table = getMetastoreClient().getTable(tableName.getSchemaName(), tableName.getTableName()).get(); + Table table = getMetastoreClient().getTable(new MetastoreContext("test_user"), tableName.getSchemaName(), tableName.getTableName()).get(); assertEquals(table.getParameters().get(PRESTO_VERSION_NAME), TEST_SERVER_VERSION); assertEquals(table.getParameters().get(PRESTO_QUERY_ID_NAME), queryId); // verify basic statistics - HiveBasicStatistics statistics = getBasicStatisticsForTable(transaction, tableName); + HiveBasicStatistics statistics = getBasicStatisticsForTable(session.getIdentity(), transaction, tableName); assertEquals(statistics.getRowCount().getAsLong(), CREATE_TABLE_DATA.getRowCount()); assertEquals(statistics.getFileCount().getAsLong(), 1L); assertGreaterThan(statistics.getInMemoryDataSizeInBytes().getAsLong(), 0L); @@ -4009,7 +4010,7 @@ protected void doCreateEmptyTable(SchemaTableName tableName, HiveStorageFormat s assertEquals(filterNonHiddenColumnMetadata(tableMetadata.getColumns()), expectedColumns); // verify table format - Table table = transaction.getMetastore().getTable(tableName.getSchemaName(), tableName.getTableName()).get(); + Table table = transaction.getMetastore().getTable(new MetastoreContext(session.getIdentity()), tableName.getSchemaName(), tableName.getTableName()).get(); assertEquals(table.getStorage().getStorageFormat().getInputFormat(), storageFormat.getInputFormat()); // verify the node version and query ID @@ -4023,14 +4024,14 @@ protected void doCreateEmptyTable(SchemaTableName tableName, HiveStorageFormat s // verify basic statistics if (partitionedBy.isEmpty()) { - assertEmptyTableStatistics(tableName, transaction); + assertEmptyTableStatistics(session.getIdentity(), tableName, transaction); } } } - protected void assertEmptyTableStatistics(SchemaTableName tableName, Transaction transaction) + protected void assertEmptyTableStatistics(ConnectorIdentity identity, SchemaTableName tableName, Transaction transaction) { - HiveBasicStatistics statistics = getBasicStatisticsForTable(transaction, tableName); + HiveBasicStatistics statistics = getBasicStatisticsForTable(identity, transaction, tableName); assertEquals(statistics.getRowCount().getAsLong(), 0L); assertEquals(statistics.getFileCount().getAsLong(), 0L); assertEquals(statistics.getInMemoryDataSizeInBytes().getAsLong(), 0L); @@ -4064,7 +4065,7 @@ private void doInsert(HiveStorageFormat storageFormat, SchemaTableName tableName assertEqualsIgnoreOrder(result.getMaterializedRows(), resultBuilder.build().getMaterializedRows()); // statistics - HiveBasicStatistics tableStatistics = getBasicStatisticsForTable(transaction, tableName); + HiveBasicStatistics tableStatistics = getBasicStatisticsForTable(session.getIdentity(), transaction, tableName); assertEquals(tableStatistics.getRowCount().getAsLong(), CREATE_TABLE_DATA.getRowCount() * (i + 1)); assertEquals(tableStatistics.getFileCount().getAsLong(), i + 1L); assertGreaterThan(tableStatistics.getInMemoryDataSizeInBytes().getAsLong(), 0L); @@ -4075,7 +4076,8 @@ private void doInsert(HiveStorageFormat storageFormat, SchemaTableName tableName // test rollback Set existingFiles; try (Transaction transaction = newTransaction()) { - existingFiles = listAllDataFiles(transaction, tableName.getSchemaName(), tableName.getTableName()); + ConnectorSession session = newSession(); + existingFiles = listAllDataFiles(session.getIdentity(), transaction, tableName.getSchemaName(), tableName.getTableName()); assertFalse(existingFiles.isEmpty()); } @@ -4099,12 +4101,12 @@ private void doInsert(HiveStorageFormat storageFormat, SchemaTableName tableName metadata.finishInsert(session, insertTableHandle, fragments, ImmutableList.of()); // statistics, visible from within transaction - HiveBasicStatistics tableStatistics = getBasicStatisticsForTable(transaction, tableName); + HiveBasicStatistics tableStatistics = getBasicStatisticsForTable(session.getIdentity(), transaction, tableName); assertEquals(tableStatistics.getRowCount().getAsLong(), CREATE_TABLE_DATA.getRowCount() * 5L); try (Transaction otherTransaction = newTransaction()) { // statistics, not visible from outside transaction - HiveBasicStatistics otherTableStatistics = getBasicStatisticsForTable(otherTransaction, tableName); + HiveBasicStatistics otherTableStatistics = getBasicStatisticsForTable(session.getIdentity(), otherTransaction, tableName); assertEquals(otherTableStatistics.getRowCount().getAsLong(), CREATE_TABLE_DATA.getRowCount() * 3L); } @@ -4145,12 +4147,13 @@ private void doInsert(HiveStorageFormat storageFormat, SchemaTableName tableName assertEqualsIgnoreOrder(result.getMaterializedRows(), resultBuilder.build().getMaterializedRows()); // verify we did not modify the table directory - assertEquals(listAllDataFiles(transaction, tableName.getSchemaName(), tableName.getTableName()), existingFiles); + assertEquals(listAllDataFiles(session.getIdentity(), transaction, tableName.getSchemaName(), tableName.getTableName()), existingFiles); } // verify statistics unchanged try (Transaction transaction = newTransaction()) { - HiveBasicStatistics statistics = getBasicStatisticsForTable(transaction, tableName); + ConnectorSession session = newSession(); + HiveBasicStatistics statistics = getBasicStatisticsForTable(session.getIdentity(), transaction, tableName); assertEquals(statistics.getRowCount().getAsLong(), CREATE_TABLE_DATA.getRowCount() * 3L); assertEquals(statistics.getFileCount().getAsLong(), 3L); } @@ -4202,21 +4205,22 @@ protected Optional getTempFilePathRoot(ConnectorOutputTableHandle outputTa .getTempPath(); } - protected Set listAllDataFiles(Transaction transaction, String schemaName, String tableName) + protected Set listAllDataFiles(ConnectorIdentity identity, Transaction transaction, String schemaName, String tableName) throws IOException { HdfsContext context = new HdfsContext(newSession(), schemaName, tableName, "test_path", false); Set existingFiles = new HashSet<>(); - for (String location : listAllDataPaths(transaction.getMetastore(), schemaName, tableName)) { + for (String location : listAllDataPaths(identity, transaction.getMetastore(), schemaName, tableName)) { existingFiles.addAll(listAllDataFiles(context, new Path(location))); } return existingFiles; } - public static List listAllDataPaths(SemiTransactionalHiveMetastore metastore, String schemaName, String tableName) + public static List listAllDataPaths(ConnectorIdentity identity, SemiTransactionalHiveMetastore metastore, String schemaName, String tableName) { ImmutableList.Builder locations = ImmutableList.builder(); - Table table = metastore.getTable(schemaName, tableName).get(); + MetastoreContext metastoreContext = new MetastoreContext(identity); + Table table = metastore.getTable(metastoreContext, schemaName, tableName).get(); if (table.getStorage().getLocation() != null) { // For partitioned table, there should be nothing directly under this directory. // But including this location in the set makes the directory content assert more @@ -4224,9 +4228,9 @@ public static List listAllDataPaths(SemiTransactionalHiveMetastore metas locations.add(table.getStorage().getLocation()); } - Optional> partitionNames = metastore.getPartitionNames(schemaName, tableName); + Optional> partitionNames = metastore.getPartitionNames(metastoreContext, schemaName, tableName); if (partitionNames.isPresent()) { - metastore.getPartitionsByNames(schemaName, tableName, partitionNames.get()).values().stream() + metastore.getPartitionsByNames(metastoreContext, schemaName, tableName, partitionNames.get()).values().stream() .map(Optional::get) .map(partition -> partition.getStorage().getLocation()) .filter(location -> !location.startsWith(table.getStorage().getLocation())) @@ -4268,15 +4272,17 @@ private void doInsertIntoNewPartition(HiveStorageFormat storageFormat, SchemaTab Set existingFiles; try (Transaction transaction = newTransaction()) { + ConnectorSession session = newSession(); + MetastoreContext metastoreContext = new MetastoreContext(session.getIdentity()); // verify partitions were created - List partitionNames = transaction.getMetastore().getPartitionNames(tableName.getSchemaName(), tableName.getTableName()) + List partitionNames = transaction.getMetastore().getPartitionNames(metastoreContext, tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new AssertionError("Table does not exist: " + tableName)); assertEqualsIgnoreOrder(partitionNames, CREATE_TABLE_PARTITIONED_DATA.getMaterializedRows().stream() .map(row -> "ds=" + row.getField(CREATE_TABLE_PARTITIONED_DATA.getTypes().size() - 1)) .collect(toList())); // verify the node versions in partitions - Map> partitions = getMetastoreClient().getPartitionsByNames(tableName.getSchemaName(), tableName.getTableName(), partitionNames); + Map> partitions = getMetastoreClient().getPartitionsByNames(metastoreContext, tableName.getSchemaName(), tableName.getTableName(), partitionNames); assertEquals(partitions.size(), partitionNames.size()); for (String partitionName : partitionNames) { Partition partition = partitions.get(partitionName).get(); @@ -4285,7 +4291,6 @@ private void doInsertIntoNewPartition(HiveStorageFormat storageFormat, SchemaTab } // load the new table - ConnectorSession session = newSession(); ConnectorMetadata metadata = transaction.getMetadata(); ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName); List columnHandles = filterNonHiddenColumnHandles(metadata.getColumnHandles(session, tableHandle).values()); @@ -4295,12 +4300,12 @@ private void doInsertIntoNewPartition(HiveStorageFormat storageFormat, SchemaTab assertEqualsIgnoreOrder(result.getMaterializedRows(), CREATE_TABLE_PARTITIONED_DATA.getMaterializedRows()); // test rollback - existingFiles = listAllDataFiles(transaction, tableName.getSchemaName(), tableName.getTableName()); + existingFiles = listAllDataFiles(session.getIdentity(), transaction, tableName.getSchemaName(), tableName.getTableName()); assertFalse(existingFiles.isEmpty()); // test statistics for (String partitionName : partitionNames) { - HiveBasicStatistics partitionStatistics = getBasicStatisticsForPartition(transaction, tableName, partitionName); + HiveBasicStatistics partitionStatistics = getBasicStatisticsForPartition(session.getIdentity(), transaction, tableName, partitionName); assertEquals(partitionStatistics.getRowCount().getAsLong(), 1L); assertEquals(partitionStatistics.getFileCount().getAsLong(), 1L); assertGreaterThan(partitionStatistics.getInMemoryDataSizeInBytes().getAsLong(), 0L); @@ -4354,7 +4359,7 @@ private void doInsertIntoNewPartition(HiveStorageFormat storageFormat, SchemaTab assertEqualsIgnoreOrder(result.getMaterializedRows(), CREATE_TABLE_PARTITIONED_DATA.getMaterializedRows()); // verify we did not modify the table directory - assertEquals(listAllDataFiles(transaction, tableName.getSchemaName(), tableName.getTableName()), existingFiles); + assertEquals(listAllDataFiles(session.getIdentity(), transaction, tableName.getSchemaName(), tableName.getTableName()), existingFiles); // verify temp directory is empty HdfsContext context = new HdfsContext(session, tableName.getSchemaName(), tableName.getTableName(), stagingPathRoot.toString(), false); @@ -4400,7 +4405,7 @@ private void doInsertIntoExistingPartition(HiveStorageFormat storageFormat, Sche ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName); // verify partitions were created - List partitionNames = transaction.getMetastore().getPartitionNames(tableName.getSchemaName(), tableName.getTableName()) + List partitionNames = transaction.getMetastore().getPartitionNames(new MetastoreContext(session.getIdentity()), tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new AssertionError("Table does not exist: " + tableName)); assertEqualsIgnoreOrder(partitionNames, CREATE_TABLE_PARTITIONED_DATA.getMaterializedRows().stream() .map(row -> "ds=" + row.getField(CREATE_TABLE_PARTITIONED_DATA.getTypes().size() - 1)) @@ -4416,7 +4421,7 @@ private void doInsertIntoExistingPartition(HiveStorageFormat storageFormat, Sche // test statistics for (String partitionName : partitionNames) { - HiveBasicStatistics statistics = getBasicStatisticsForPartition(transaction, tableName, partitionName); + HiveBasicStatistics statistics = getBasicStatisticsForPartition(session.getIdentity(), transaction, tableName, partitionName); assertEquals(statistics.getRowCount().getAsLong(), i + 1L); assertEquals(statistics.getFileCount().getAsLong(), i + 1L); assertGreaterThan(statistics.getInMemoryDataSizeInBytes().getAsLong(), 0L); @@ -4432,7 +4437,7 @@ private void doInsertIntoExistingPartition(HiveStorageFormat storageFormat, Sche ConnectorMetadata metadata = transaction.getMetadata(); ConnectorSession session = newSession(); - existingFiles = listAllDataFiles(transaction, tableName.getSchemaName(), tableName.getTableName()); + existingFiles = listAllDataFiles(session.getIdentity(), transaction, tableName.getSchemaName(), tableName.getTableName()); assertFalse(existingFiles.isEmpty()); ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName); @@ -4464,10 +4469,10 @@ private void doInsertIntoExistingPartition(HiveStorageFormat storageFormat, Sche } // verify statistics are visible from within of the current transaction - List partitionNames = transaction.getMetastore().getPartitionNames(tableName.getSchemaName(), tableName.getTableName()) + List partitionNames = transaction.getMetastore().getPartitionNames(new MetastoreContext(session.getIdentity()), tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new AssertionError("Table does not exist: " + tableName)); for (String partitionName : partitionNames) { - HiveBasicStatistics partitionStatistics = getBasicStatisticsForPartition(transaction, tableName, partitionName); + HiveBasicStatistics partitionStatistics = getBasicStatisticsForPartition(session.getIdentity(), transaction, tableName, partitionName); assertEquals(partitionStatistics.getRowCount().getAsLong(), 5L); } @@ -4486,17 +4491,17 @@ private void doInsertIntoExistingPartition(HiveStorageFormat storageFormat, Sche assertEqualsIgnoreOrder(result.getMaterializedRows(), resultBuilder.build().getMaterializedRows()); // verify we did not modify the table directory - assertEquals(listAllDataFiles(transaction, tableName.getSchemaName(), tableName.getTableName()), existingFiles); + assertEquals(listAllDataFiles(session.getIdentity(), transaction, tableName.getSchemaName(), tableName.getTableName()), existingFiles); // verify temp directory is empty HdfsContext context = new HdfsContext(session, tableName.getSchemaName(), tableName.getTableName(), stagingPathRoot.toString(), false); assertTrue(listAllDataFiles(context, stagingPathRoot).isEmpty()); // verify statistics have been rolled back - List partitionNames = transaction.getMetastore().getPartitionNames(tableName.getSchemaName(), tableName.getTableName()) + List partitionNames = transaction.getMetastore().getPartitionNames(new MetastoreContext(session.getIdentity()), tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new AssertionError("Table does not exist: " + tableName)); for (String partitionName : partitionNames) { - HiveBasicStatistics partitionStatistics = getBasicStatisticsForPartition(transaction, tableName, partitionName); + HiveBasicStatistics partitionStatistics = getBasicStatisticsForPartition(session.getIdentity(), transaction, tableName, partitionName); assertEquals(partitionStatistics.getRowCount().getAsLong(), 3L); } } @@ -4521,13 +4526,13 @@ private void doInsertIntoExistingPartitionEmptyStatistics(HiveStorageFormat stor eraseStatistics(tableName); insertData(tableName, CREATE_TABLE_PARTITIONED_DATA); - + ConnectorSession session = newSession(); try (Transaction transaction = newTransaction()) { - List partitionNames = transaction.getMetastore().getPartitionNames(tableName.getSchemaName(), tableName.getTableName()) + List partitionNames = transaction.getMetastore().getPartitionNames(new MetastoreContext(session.getIdentity()), tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new AssertionError("Table does not exist: " + tableName)); for (String partitionName : partitionNames) { - HiveBasicStatistics statistics = getBasicStatisticsForPartition(transaction, tableName, partitionName); + HiveBasicStatistics statistics = getBasicStatisticsForPartition(session.getIdentity(), transaction, tableName, partitionName); assertThat(statistics.getRowCount()).isNotPresent(); assertThat(statistics.getInMemoryDataSizeInBytes()).isNotPresent(); // fileCount and rawSize statistics are computed on the fly by the metastore, thus cannot be erased @@ -4535,19 +4540,19 @@ private void doInsertIntoExistingPartitionEmptyStatistics(HiveStorageFormat stor } } - private static HiveBasicStatistics getBasicStatisticsForTable(Transaction transaction, SchemaTableName table) + private static HiveBasicStatistics getBasicStatisticsForTable(ConnectorIdentity identity, Transaction transaction, SchemaTableName table) { return transaction .getMetastore() - .getTableStatistics(table.getSchemaName(), table.getTableName()) + .getTableStatistics(new MetastoreContext(identity), table.getSchemaName(), table.getTableName()) .getBasicStatistics(); } - private static HiveBasicStatistics getBasicStatisticsForPartition(Transaction transaction, SchemaTableName table, String partitionName) + private static HiveBasicStatistics getBasicStatisticsForPartition(ConnectorIdentity identity, Transaction transaction, SchemaTableName table, String partitionName) { return transaction .getMetastore() - .getPartitionStatistics(table.getSchemaName(), table.getTableName(), ImmutableSet.of(partitionName)) + .getPartitionStatistics(new MetastoreContext(identity), table.getSchemaName(), table.getTableName(), ImmutableSet.of(partitionName)) .get(partitionName) .getBasicStatistics(); } @@ -4555,17 +4560,17 @@ private static HiveBasicStatistics getBasicStatisticsForPartition(Transaction tr private void eraseStatistics(SchemaTableName schemaTableName) { ExtendedHiveMetastore metastoreClient = getMetastoreClient(); - metastoreClient.updateTableStatistics(schemaTableName.getSchemaName(), schemaTableName.getTableName(), statistics -> new PartitionStatistics(createEmptyStatistics(), ImmutableMap.of())); - Table table = metastoreClient.getTable(schemaTableName.getSchemaName(), schemaTableName.getTableName()) + metastoreClient.updateTableStatistics(new MetastoreContext("test_user"), schemaTableName.getSchemaName(), schemaTableName.getTableName(), statistics -> new PartitionStatistics(createEmptyStatistics(), ImmutableMap.of())); + Table table = metastoreClient.getTable(new MetastoreContext("test_user"), schemaTableName.getSchemaName(), schemaTableName.getTableName()) .orElseThrow(() -> new TableNotFoundException(schemaTableName)); List partitionColumns = table.getPartitionColumns().stream() .map(Column::getName) .collect(toImmutableList()); if (!table.getPartitionColumns().isEmpty()) { - List partitionNames = metastoreClient.getPartitionNames(schemaTableName.getSchemaName(), schemaTableName.getTableName()) + List partitionNames = metastoreClient.getPartitionNames(new MetastoreContext("test_user"), schemaTableName.getSchemaName(), schemaTableName.getTableName()) .orElse(ImmutableList.of()); List partitions = metastoreClient - .getPartitionsByNames(schemaTableName.getSchemaName(), schemaTableName.getTableName(), partitionNames) + .getPartitionsByNames(new MetastoreContext("test_user"), schemaTableName.getSchemaName(), schemaTableName.getTableName(), partitionNames) .entrySet() .stream() .map(Map.Entry::getValue) @@ -4574,6 +4579,7 @@ private void eraseStatistics(SchemaTableName schemaTableName) .collect(toImmutableList()); for (Partition partition : partitions) { metastoreClient.updatePartitionStatistics( + new MetastoreContext("test_user"), schemaTableName.getSchemaName(), schemaTableName.getTableName(), makePartName(partitionColumns, partition.getValues()), @@ -4642,14 +4648,14 @@ private void doTestMetadataDelete(HiveStorageFormat storageFormat, SchemaTableNa ConnectorMetadata metadata = transaction.getMetadata(); // verify partitions were created - List partitionNames = transaction.getMetastore().getPartitionNames(tableName.getSchemaName(), tableName.getTableName()) + List partitionNames = transaction.getMetastore().getPartitionNames(new MetastoreContext(session.getIdentity()), tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new AssertionError("Table does not exist: " + tableName)); assertEqualsIgnoreOrder(partitionNames, CREATE_TABLE_PARTITIONED_DATA.getMaterializedRows().stream() .map(row -> "ds=" + row.getField(CREATE_TABLE_PARTITIONED_DATA.getTypes().size() - 1)) .collect(toList())); // verify table directory is not empty - Set filesAfterInsert = listAllDataFiles(transaction, tableName.getSchemaName(), tableName.getTableName()); + Set filesAfterInsert = listAllDataFiles(session.getIdentity(), transaction, tableName.getSchemaName(), tableName.getTableName()); assertFalse(filesAfterInsert.isEmpty()); // verify the data @@ -4723,7 +4729,7 @@ private void doTestMetadataDelete(HiveStorageFormat storageFormat, SchemaTableNa assertEqualsIgnoreOrder(actualAfterDelete2.getMaterializedRows(), ImmutableList.of()); // verify table directory is empty - Set filesAfterDelete = listAllDataFiles(transaction, tableName.getSchemaName(), tableName.getTableName()); + Set filesAfterDelete = listAllDataFiles(session.getIdentity(), transaction, tableName.getSchemaName(), tableName.getTableName()); assertTrue(filesAfterDelete.isEmpty()); } } @@ -5339,7 +5345,8 @@ protected Table createEmptyTable(SchemaTableName schemaTableName, HiveStorageFor assertEquals(targetDirectoryList, ImmutableList.of()); try (Transaction transaction = newTransaction()) { - return transaction.getMetastore().getTable(schemaTableName.getSchemaName(), schemaTableName.getTableName()).get(); + ConnectorSession session = newSession(); + return transaction.getMetastore().getTable(new MetastoreContext(session.getIdentity()), schemaTableName.getSchemaName(), schemaTableName.getTableName()).get(); } } @@ -5351,13 +5358,13 @@ private void alterBucketProperty(SchemaTableName schemaTableName, Optional table = transaction.getMetastore().getTable(schemaName, tableName); + MetastoreContext metastoreContext = new MetastoreContext(session.getIdentity()); + Optional
table = transaction.getMetastore().getTable(metastoreContext, schemaName, tableName); Table.Builder tableBuilder = Table.builder(table.get()); tableBuilder.getStorageBuilder().setBucketProperty(bucketProperty); PrincipalPrivileges principalPrivileges = testingPrincipalPrivilege(tableOwner, session.getUser()); // hack: replaceView can be used as replaceTable despite its name - transaction.getMetastore().replaceView(schemaName, tableName, tableBuilder.build(), principalPrivileges); + transaction.getMetastore().replaceView(metastoreContext, schemaName, tableName, tableBuilder.build(), principalPrivileges); transaction.commit(); } @@ -5571,9 +5578,10 @@ private void doTestTransactionDeleteInsert( } try (Transaction transaction = newTransaction()) { + ConnectorSession session = newSession(); // verify partitions List partitionNames = transaction.getMetastore() - .getPartitionNames(tableName.getSchemaName(), tableName.getTableName()) + .getPartitionNames(new MetastoreContext(session.getIdentity()), tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new AssertionError("Table does not exist: " + tableName)); assertEqualsIgnoreOrder( partitionNames, @@ -5583,7 +5591,6 @@ private void doTestTransactionDeleteInsert( .collect(toList())); // load the new table - ConnectorSession session = newSession(); ConnectorMetadata metadata = transaction.getMetadata(); ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName); List columnHandles = filterNonHiddenColumnHandles(metadata.getColumnHandles(session, tableHandle).values()); @@ -5686,11 +5693,12 @@ public void triggerConflict(ConnectorSession session, SchemaTableName tableName, // This method bypasses transaction interface because this method is inherently hacky and doesn't work well with the transaction abstraction. // Additionally, this method is not part of a test. Its purpose is to set up an environment for another test. ExtendedHiveMetastore metastoreClient = getMetastoreClient(); - Optional partition = metastoreClient.getPartition(tableName.getSchemaName(), tableName.getTableName(), copyPartitionFrom); + Optional partition = metastoreClient.getPartition(new MetastoreContext("test_user"), tableName.getSchemaName(), tableName.getTableName(), copyPartitionFrom); conflictPartition = Partition.builder(partition.get()) .setValues(toPartitionValues(partitionNameToConflict)) .build(); metastoreClient.addPartitions( + new MetastoreContext("test_user"), tableName.getSchemaName(), tableName.getTableName(), ImmutableList.of(new PartitionWithStatistics(conflictPartition, partitionNameToConflict, PartitionStatistics.empty()))); @@ -5702,11 +5710,11 @@ public void verifyAndCleanup(SchemaTableName tableName) // This method bypasses transaction interface because this method is inherently hacky and doesn't work well with the transaction abstraction. // Additionally, this method is not part of a test. Its purpose is to set up an environment for another test. ExtendedHiveMetastore metastoreClient = getMetastoreClient(); - Optional actualPartition = metastoreClient.getPartition(tableName.getSchemaName(), tableName.getTableName(), toPartitionValues(partitionNameToConflict)); + Optional actualPartition = metastoreClient.getPartition(new MetastoreContext("test_user"), tableName.getSchemaName(), tableName.getTableName(), toPartitionValues(partitionNameToConflict)); // Make sure the partition inserted to trigger conflict was not overwritten // Checking storage location is sufficient because implement never uses .../pk1=a/pk2=a2 as the directory for partition [b, b2]. assertEquals(actualPartition.get().getStorage().getLocation(), conflictPartition.getStorage().getLocation()); - metastoreClient.dropPartition(tableName.getSchemaName(), tableName.getTableName(), conflictPartition.getValues(), false); + metastoreClient.dropPartition(new MetastoreContext("test_user"), tableName.getSchemaName(), tableName.getTableName(), conflictPartition.getValues(), false); } } @@ -5721,7 +5729,7 @@ public void triggerConflict(ConnectorSession session, SchemaTableName tableName, // This method bypasses transaction interface because this method is inherently hacky and doesn't work well with the transaction abstraction. // Additionally, this method is not part of a test. Its purpose is to set up an environment for another test. ExtendedHiveMetastore metastoreClient = getMetastoreClient(); - metastoreClient.dropPartition(tableName.getSchemaName(), tableName.getTableName(), partitionValueToConflict, false); + metastoreClient.dropPartition(new MetastoreContext("test_user"), tableName.getSchemaName(), tableName.getTableName(), partitionValueToConflict, false); } @Override diff --git a/presto-hive/src/test/java/com/facebook/presto/hive/AbstractTestHiveClientLocal.java b/presto-hive/src/test/java/com/facebook/presto/hive/AbstractTestHiveClientLocal.java index f02f6a9e93d9b..650366a971463 100644 --- a/presto-hive/src/test/java/com/facebook/presto/hive/AbstractTestHiveClientLocal.java +++ b/presto-hive/src/test/java/com/facebook/presto/hive/AbstractTestHiveClientLocal.java @@ -16,6 +16,7 @@ import com.facebook.presto.cache.CacheConfig; import com.facebook.presto.hive.metastore.Database; import com.facebook.presto.hive.metastore.ExtendedHiveMetastore; +import com.facebook.presto.hive.metastore.MetastoreContext; import com.facebook.presto.spi.ConnectorTableHandle; import com.facebook.presto.spi.SchemaTableName; import com.facebook.presto.spi.connector.ConnectorMetadata; @@ -59,7 +60,7 @@ public void initialize() ExtendedHiveMetastore metastore = createMetastore(tempDir); - metastore.createDatabase(Database.builder() + metastore.createDatabase(new MetastoreContext("test_user"), Database.builder() .setDatabaseName(testDbName) .setOwnerName("public") .setOwnerType(PrincipalType.ROLE) @@ -77,7 +78,7 @@ public void cleanup() throws IOException { try { - getMetastoreClient().dropDatabase(testDbName); + getMetastoreClient().dropDatabase(new MetastoreContext("test_user"), testDbName); } finally { deleteRecursively(tempDir.toPath(), ALLOW_INSECURE); diff --git a/presto-hive/src/test/java/com/facebook/presto/hive/AbstractTestHiveFileSystem.java b/presto-hive/src/test/java/com/facebook/presto/hive/AbstractTestHiveFileSystem.java index c6609b31eec65..a5c9cd23b710c 100644 --- a/presto-hive/src/test/java/com/facebook/presto/hive/AbstractTestHiveFileSystem.java +++ b/presto-hive/src/test/java/com/facebook/presto/hive/AbstractTestHiveFileSystem.java @@ -26,6 +26,7 @@ import com.facebook.presto.hive.metastore.Database; import com.facebook.presto.hive.metastore.ExtendedHiveMetastore; import com.facebook.presto.hive.metastore.HivePartitionMutator; +import com.facebook.presto.hive.metastore.MetastoreContext; import com.facebook.presto.hive.metastore.PrincipalPrivileges; import com.facebook.presto.hive.metastore.Table; import com.facebook.presto.hive.metastore.thrift.BridgingHiveMetastore; @@ -182,7 +183,7 @@ protected void setup(String host, int port, String databaseName, BiFunction columns = ImmutableList.builder() @@ -425,6 +426,7 @@ private void createTable(SchemaTableName tableName, HiveStorageFormat storageFor // We work around that by using a dummy location when creating the // table and update it here to the correct location. metastoreClient.updateTableLocation( + metastoreContext, database, tableName.getTableName(), locationService.getTableWriteInfo(((HiveOutputTableHandle) outputHandle).getLocationHandle()).getTargetPath().toString()); @@ -499,41 +501,41 @@ public TestingHiveMetastore(ExtendedHiveMetastore delegate, ExecutorService exec } @Override - public Optional getDatabase(String databaseName) + public Optional getDatabase(MetastoreContext metastoreContext, String databaseName) { - return super.getDatabase(databaseName) + return super.getDatabase(metastoreContext, databaseName) .map(database -> Database.builder(database) .setLocation(Optional.of(basePath.toString())) .build()); } @Override - public void createTable(Table table, PrincipalPrivileges privileges) + public void createTable(MetastoreContext metastoreContext, Table table, PrincipalPrivileges privileges) { // hack to work around the metastore not being configured for S3 or other FS Table.Builder tableBuilder = Table.builder(table); tableBuilder.getStorageBuilder().setLocation("/"); - super.createTable(tableBuilder.build(), privileges); + super.createTable(metastoreContext, tableBuilder.build(), privileges); } @Override - public void dropTable(String databaseName, String tableName, boolean deleteData) + public void dropTable(MetastoreContext metastoreContext, String databaseName, String tableName, boolean deleteData) { try { - Optional
table = getTable(databaseName, tableName); + Optional
table = getTable(metastoreContext, databaseName, tableName); if (!table.isPresent()) { throw new TableNotFoundException(new SchemaTableName(databaseName, tableName)); } // hack to work around the metastore not being configured for S3 or other FS - List locations = listAllDataPaths(databaseName, tableName); + List locations = listAllDataPaths(metastoreContext, databaseName, tableName); Table.Builder tableBuilder = Table.builder(table.get()); tableBuilder.getStorageBuilder().setLocation("/"); // drop table - replaceTable(databaseName, tableName, tableBuilder.build(), new PrincipalPrivileges(ImmutableMultimap.of(), ImmutableMultimap.of())); - delegate.dropTable(databaseName, tableName, false); + replaceTable(metastoreContext, databaseName, tableName, tableBuilder.build(), new PrincipalPrivileges(ImmutableMultimap.of(), ImmutableMultimap.of())); + delegate.dropTable(metastoreContext, databaseName, tableName, false); // drop data if (deleteData) { @@ -551,9 +553,9 @@ public void dropTable(String databaseName, String tableName, boolean deleteData) } } - public void updateTableLocation(String databaseName, String tableName, String location) + public void updateTableLocation(MetastoreContext metastoreContext, String databaseName, String tableName, String location) { - Optional
table = getTable(databaseName, tableName); + Optional
table = getTable(metastoreContext, databaseName, tableName); if (!table.isPresent()) { throw new TableNotFoundException(new SchemaTableName(databaseName, tableName)); } @@ -562,13 +564,13 @@ public void updateTableLocation(String databaseName, String tableName, String lo tableBuilder.getStorageBuilder().setLocation(location); // NOTE: this clears the permissions - replaceTable(databaseName, tableName, tableBuilder.build(), new PrincipalPrivileges(ImmutableMultimap.of(), ImmutableMultimap.of())); + replaceTable(metastoreContext, databaseName, tableName, tableBuilder.build(), new PrincipalPrivileges(ImmutableMultimap.of(), ImmutableMultimap.of())); } - private List listAllDataPaths(String schemaName, String tableName) + private List listAllDataPaths(MetastoreContext metastoreContext, String schemaName, String tableName) { ImmutableList.Builder locations = ImmutableList.builder(); - Table table = getTable(schemaName, tableName).get(); + Table table = getTable(metastoreContext, schemaName, tableName).get(); if (table.getStorage().getLocation() != null) { // For partitioned table, there should be nothing directly under this directory. // But including this location in the set makes the directory content assert more @@ -576,9 +578,9 @@ private List listAllDataPaths(String schemaName, String tableName) locations.add(table.getStorage().getLocation()); } - Optional> partitionNames = getPartitionNames(schemaName, tableName); + Optional> partitionNames = getPartitionNames(metastoreContext, schemaName, tableName); if (partitionNames.isPresent()) { - getPartitionsByNames(schemaName, tableName, partitionNames.get()).values().stream() + getPartitionsByNames(metastoreContext, schemaName, tableName, partitionNames.get()).values().stream() .map(Optional::get) .map(partition -> partition.getStorage().getLocation()) .filter(location -> !location.startsWith(table.getStorage().getLocation())) diff --git a/presto-hive/src/test/java/com/facebook/presto/hive/HiveBenchmarkQueryRunner.java b/presto-hive/src/test/java/com/facebook/presto/hive/HiveBenchmarkQueryRunner.java index dbe6669a224bd..0a0f2e605850d 100644 --- a/presto-hive/src/test/java/com/facebook/presto/hive/HiveBenchmarkQueryRunner.java +++ b/presto-hive/src/test/java/com/facebook/presto/hive/HiveBenchmarkQueryRunner.java @@ -17,6 +17,7 @@ import com.facebook.presto.benchmark.BenchmarkSuite; import com.facebook.presto.hive.metastore.Database; import com.facebook.presto.hive.metastore.ExtendedHiveMetastore; +import com.facebook.presto.hive.metastore.MetastoreContext; import com.facebook.presto.spi.security.PrincipalType; import com.facebook.presto.testing.LocalQueryRunner; import com.facebook.presto.tpch.TpchConnectorFactory; @@ -68,7 +69,7 @@ public static LocalQueryRunner createLocalQueryRunner(File tempDir) // add hive File hiveDir = new File(tempDir, "hive_data"); ExtendedHiveMetastore metastore = createTestingFileHiveMetastore(hiveDir); - metastore.createDatabase(Database.builder() + metastore.createDatabase(new MetastoreContext("test_user"), Database.builder() .setDatabaseName("tpch") .setOwnerName("public") .setOwnerType(PrincipalType.ROLE) diff --git a/presto-hive/src/test/java/com/facebook/presto/hive/HiveQueryRunner.java b/presto-hive/src/test/java/com/facebook/presto/hive/HiveQueryRunner.java index 5a10a70d6b472..b8b38128f737d 100644 --- a/presto-hive/src/test/java/com/facebook/presto/hive/HiveQueryRunner.java +++ b/presto-hive/src/test/java/com/facebook/presto/hive/HiveQueryRunner.java @@ -21,6 +21,7 @@ import com.facebook.presto.hive.TestHiveEventListenerPlugin.TestingHiveEventListenerPlugin; import com.facebook.presto.hive.authentication.NoHdfsAuthentication; import com.facebook.presto.hive.metastore.Database; +import com.facebook.presto.hive.metastore.MetastoreContext; import com.facebook.presto.hive.metastore.file.FileHiveMetastore; import com.facebook.presto.spi.security.Identity; import com.facebook.presto.spi.security.PrincipalType; @@ -183,18 +184,18 @@ public static DistributedQueryRunner createQueryRunner( queryRunner.createCatalog(HIVE_CATALOG, HIVE_CATALOG, hiveProperties); queryRunner.createCatalog(HIVE_BUCKETED_CATALOG, HIVE_CATALOG, hiveBucketedProperties); - if (!metastore.getDatabase(TPCH_SCHEMA).isPresent()) { - metastore.createDatabase(createDatabaseMetastoreObject(TPCH_SCHEMA)); + if (!metastore.getDatabase(new MetastoreContext("test_user"), TPCH_SCHEMA).isPresent()) { + metastore.createDatabase(new MetastoreContext("test_user"), createDatabaseMetastoreObject(TPCH_SCHEMA)); copyTpchTables(queryRunner, "tpch", TINY_SCHEMA_NAME, createSession(Optional.empty()), tables); } - if (!metastore.getDatabase(TPCH_BUCKETED_SCHEMA).isPresent()) { - metastore.createDatabase(createDatabaseMetastoreObject(TPCH_BUCKETED_SCHEMA)); + if (!metastore.getDatabase(new MetastoreContext("test_user"), TPCH_BUCKETED_SCHEMA).isPresent()) { + metastore.createDatabase(new MetastoreContext("test_user"), createDatabaseMetastoreObject(TPCH_BUCKETED_SCHEMA)); copyTpchTablesBucketed(queryRunner, "tpch", TINY_SCHEMA_NAME, createBucketedSession(Optional.empty()), tables); } - if (!metastore.getDatabase(TEMPORARY_TABLE_SCHEMA).isPresent()) { - metastore.createDatabase(createDatabaseMetastoreObject(TEMPORARY_TABLE_SCHEMA)); + if (!metastore.getDatabase(new MetastoreContext("test_user"), TEMPORARY_TABLE_SCHEMA).isPresent()) { + metastore.createDatabase(new MetastoreContext("test_user"), createDatabaseMetastoreObject(TEMPORARY_TABLE_SCHEMA)); } return queryRunner; diff --git a/presto-hive/src/test/java/com/facebook/presto/hive/TestHiveClientFileMetastore.java b/presto-hive/src/test/java/com/facebook/presto/hive/TestHiveClientFileMetastore.java index 88f5ac652f89e..7f339c1ccb335 100644 --- a/presto-hive/src/test/java/com/facebook/presto/hive/TestHiveClientFileMetastore.java +++ b/presto-hive/src/test/java/com/facebook/presto/hive/TestHiveClientFileMetastore.java @@ -18,6 +18,7 @@ import com.facebook.presto.execution.warnings.WarningHandlingLevel; import com.facebook.presto.hive.authentication.NoHdfsAuthentication; import com.facebook.presto.hive.metastore.ExtendedHiveMetastore; +import com.facebook.presto.hive.metastore.MetastoreContext; import com.facebook.presto.hive.metastore.Partition; import com.facebook.presto.hive.metastore.PartitionStatistics; import com.facebook.presto.hive.metastore.PartitionWithStatistics; @@ -168,10 +169,11 @@ public void testPartitionNotReadable() private void createDummyPartitionedTable(SchemaTableName tableName, List columns, Map dynamicPartitionParameters) throws Exception { + MetastoreContext metastoreContext = new MetastoreContext("test_user"); doCreateEmptyTable(tableName, ORC, columns); ExtendedHiveMetastore metastoreClient = getMetastoreClient(); - Table table = metastoreClient.getTable(tableName.getSchemaName(), tableName.getTableName()) + Table table = metastoreClient.getTable(metastoreContext, tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new TableNotFoundException(tableName)); List firstPartitionValues = ImmutableList.of("2020-01-01"); @@ -186,16 +188,16 @@ private void createDummyPartitionedTable(SchemaTableName tableName, List new PartitionWithStatistics(createDummyPartition(table, partitionName), partitionName, PartitionStatistics.empty())) .collect(toImmutableList()); - metastoreClient.addPartitions(tableName.getSchemaName(), tableName.getTableName(), partitions); - metastoreClient.updatePartitionStatistics(tableName.getSchemaName(), tableName.getTableName(), firstPartitionName, currentStatistics -> EMPTY_TABLE_STATISTICS); - metastoreClient.updatePartitionStatistics(tableName.getSchemaName(), tableName.getTableName(), secondPartitionName, currentStatistics -> EMPTY_TABLE_STATISTICS); + metastoreClient.addPartitions(metastoreContext, tableName.getSchemaName(), tableName.getTableName(), partitions); + metastoreClient.updatePartitionStatistics(metastoreContext, tableName.getSchemaName(), tableName.getTableName(), firstPartitionName, currentStatistics -> EMPTY_TABLE_STATISTICS); + metastoreClient.updatePartitionStatistics(metastoreContext, tableName.getSchemaName(), tableName.getTableName(), secondPartitionName, currentStatistics -> EMPTY_TABLE_STATISTICS); List partitionsNotReadable = ImmutableList.of(thirdPartitionName) .stream() .map(partitionName -> new PartitionWithStatistics(createDummyPartition(table, partitionName, dynamicPartitionParameters), partitionName, PartitionStatistics.empty())) .collect(toImmutableList()); - metastoreClient.addPartitions(tableName.getSchemaName(), tableName.getTableName(), partitionsNotReadable); - metastoreClient.updatePartitionStatistics(tableName.getSchemaName(), tableName.getTableName(), thirdPartitionName, currentStatistics -> EMPTY_TABLE_STATISTICS); + metastoreClient.addPartitions(metastoreContext, tableName.getSchemaName(), tableName.getTableName(), partitionsNotReadable); + metastoreClient.updatePartitionStatistics(metastoreContext, tableName.getSchemaName(), tableName.getTableName(), thirdPartitionName, currentStatistics -> EMPTY_TABLE_STATISTICS); } private Partition createDummyPartition(Table table, String partitionName, Map dynamicPartitionParameters) diff --git a/presto-hive/src/test/java/com/facebook/presto/hive/TestHiveMetadataFileFormatEncryptionSettings.java b/presto-hive/src/test/java/com/facebook/presto/hive/TestHiveMetadataFileFormatEncryptionSettings.java index cbaa8ec66423a..1ac18111ee5be 100644 --- a/presto-hive/src/test/java/com/facebook/presto/hive/TestHiveMetadataFileFormatEncryptionSettings.java +++ b/presto-hive/src/test/java/com/facebook/presto/hive/TestHiveMetadataFileFormatEncryptionSettings.java @@ -19,6 +19,7 @@ import com.facebook.presto.hive.metastore.Database; import com.facebook.presto.hive.metastore.ExtendedHiveMetastore; import com.facebook.presto.hive.metastore.HivePartitionMutator; +import com.facebook.presto.hive.metastore.MetastoreContext; import com.facebook.presto.hive.metastore.Partition; import com.facebook.presto.hive.metastore.thrift.BridgingHiveMetastore; import com.facebook.presto.hive.metastore.thrift.InMemoryHiveMetastore; @@ -118,6 +119,7 @@ public void setup() true, HIVE_CLIENT_CONFIG.getMaxPartitionBatchSize(), HIVE_CLIENT_CONFIG.getMaxPartitionsPerScan(), + false, FUNCTION_AND_TYPE_MANAGER, new HiveLocationService(HDFS_ENVIRONMENT), FUNCTION_RESOLUTION, @@ -135,7 +137,7 @@ public void setup() new HivePartitionStats(), new HiveFileRenamer()); - metastore.createDatabase(Database.builder() + metastore.createDatabase(new MetastoreContext("test_user"), Database.builder() .setDatabaseName(TEST_DB_NAME) .setOwnerName("public") .setOwnerType(PrincipalType.ROLE) @@ -287,7 +289,7 @@ public void testCreateTableAsSelectWithColumnKeyReference() metadata.commit(); - Map> partitions = metastore.getPartitionsByNames(TEST_DB_NAME, tableName, ImmutableList.of("ds=2020-06-26", "ds=2020-06-27")); + Map> partitions = metastore.getPartitionsByNames(new MetastoreContext("test_user"), TEST_DB_NAME, tableName, ImmutableList.of("ds=2020-06-26", "ds=2020-06-27")); assertEquals(partitions.get("ds=2020-06-26").get().getParameters().get(TEST_EXTRA_METADATA), "test_algo"); assertEquals(partitions.get("ds=2020-06-27").get().getParameters().get(TEST_EXTRA_METADATA), "test_algo"); // Checking NEW_PARTITION_USER_SUPPLIED_PARAMETER @@ -457,7 +459,7 @@ ENCRYPT_COLUMNS, fromTableProperty(ImmutableList.of("key1:t_struct.str")), ImmutableList.of()); createHiveMetadata.commit(); - Map> partitions = metastore.getPartitionsByNames(TEST_DB_NAME, tableName, ImmutableList.of("ds=2020-06-26", "ds=2020-06-27")); + Map> partitions = metastore.getPartitionsByNames(new MetastoreContext("test_user"), TEST_DB_NAME, tableName, ImmutableList.of("ds=2020-06-26", "ds=2020-06-27")); assertEquals(partitions.get("ds=2020-06-26").get().getStorage().getLocation(), "path1"); assertEquals(partitions.get("ds=2020-06-26").get().getParameters().get(TEST_EXTRA_METADATA), "test_algo"); assertEquals(partitions.get("ds=2020-06-27").get().getParameters().get(TEST_EXTRA_METADATA), "test_algo"); @@ -474,7 +476,7 @@ ENCRYPT_COLUMNS, fromTableProperty(ImmutableList.of("key1:t_struct.str")), ImmutableList.of()); overrideHiveMetadata.commit(); - partitions = metastore.getPartitionsByNames(TEST_DB_NAME, tableName, ImmutableList.of("ds=2020-06-26")); + partitions = metastore.getPartitionsByNames(new MetastoreContext("test_user"), TEST_DB_NAME, tableName, ImmutableList.of("ds=2020-06-26")); assertEquals(partitions.get("ds=2020-06-26").get().getStorage().getLocation(), "path3"); assertEquals(partitions.get("ds=2020-06-26").get().getParameters().get(TEST_EXTRA_METADATA), "test_algo"); } diff --git a/presto-hive/src/test/java/com/facebook/presto/hive/TestHivePageSink.java b/presto-hive/src/test/java/com/facebook/presto/hive/TestHivePageSink.java index 53555a917ec84..05ecc25ef5cbd 100644 --- a/presto-hive/src/test/java/com/facebook/presto/hive/TestHivePageSink.java +++ b/presto-hive/src/test/java/com/facebook/presto/hive/TestHivePageSink.java @@ -23,6 +23,7 @@ import com.facebook.presto.hive.metastore.Column; import com.facebook.presto.hive.metastore.ExtendedHiveMetastore; import com.facebook.presto.hive.metastore.HivePageSinkMetadata; +import com.facebook.presto.hive.metastore.MetastoreContext; import com.facebook.presto.hive.metastore.Storage; import com.facebook.presto.hive.metastore.StorageFormat; import com.facebook.presto.metadata.MetadataManager; @@ -289,7 +290,7 @@ private static ConnectorPageSink createPageSink(HiveTransactionHandle transactio TABLE_NAME, getColumnHandles(), "test", - new HivePageSinkMetadata(new SchemaTableName(SCHEMA_NAME, TABLE_NAME), metastore.getTable(SCHEMA_NAME, TABLE_NAME), ImmutableMap.of()), + new HivePageSinkMetadata(new SchemaTableName(SCHEMA_NAME, TABLE_NAME), metastore.getTable(new MetastoreContext("test_user"), SCHEMA_NAME, TABLE_NAME), ImmutableMap.of()), locationHandle, config.getHiveStorageFormat(), config.getHiveStorageFormat(), diff --git a/presto-hive/src/test/java/com/facebook/presto/hive/TestHiveSplitManager.java b/presto-hive/src/test/java/com/facebook/presto/hive/TestHiveSplitManager.java index 157d67d65e3db..ed3edefa97d27 100644 --- a/presto-hive/src/test/java/com/facebook/presto/hive/TestHiveSplitManager.java +++ b/presto-hive/src/test/java/com/facebook/presto/hive/TestHiveSplitManager.java @@ -27,6 +27,7 @@ import com.facebook.presto.hive.datasink.OutputStreamDataSinkFactory; import com.facebook.presto.hive.filesystem.ExtendedFileSystem; import com.facebook.presto.hive.metastore.Column; +import com.facebook.presto.hive.metastore.MetastoreContext; import com.facebook.presto.hive.metastore.Partition; import com.facebook.presto.hive.metastore.PartitionStatistics; import com.facebook.presto.hive.metastore.PartitionWithStatistics; @@ -492,6 +493,7 @@ private void assertRedundantColumnDomains(Range predicateRange, PartitionStatist true, hiveClientConfig.getMaxPartitionBatchSize(), hiveClientConfig.getMaxPartitionsPerScan(), + false, FUNCTION_AND_TYPE_MANAGER, new HiveLocationService(hdfsEnvironment), FUNCTION_RESOLUTION, @@ -604,19 +606,19 @@ public TestingExtendedHiveMetastore(Table table, PartitionWithStatistics partiti } @Override - public Optional
getTable(String databaseName, String tableName) + public Optional
getTable(MetastoreContext metastoreContext, String databaseName, String tableName) { return Optional.of(table); } @Override - public Map> getPartitionsByNames(String databaseName, String tableName, List partitionNames) + public Map> getPartitionsByNames(MetastoreContext metastoreContext, String databaseName, String tableName, List partitionNames) { return ImmutableMap.of(partitionWithStatistics.getPartitionName(), Optional.of(partitionWithStatistics.getPartition())); } @Override - public Map getPartitionStatistics(String databaseName, String tableName, Set partitionNames) + public Map getPartitionStatistics(MetastoreContext metastoreContext, String databaseName, String tableName, Set partitionNames) { return ImmutableMap.of(partitionWithStatistics.getPartitionName(), partitionWithStatistics.getStatistics()); } diff --git a/presto-hive/src/test/java/com/facebook/presto/hive/TestingSemiTransactionalHiveMetastore.java b/presto-hive/src/test/java/com/facebook/presto/hive/TestingSemiTransactionalHiveMetastore.java index 0d15bb39a28ee..db8d8c661e011 100644 --- a/presto-hive/src/test/java/com/facebook/presto/hive/TestingSemiTransactionalHiveMetastore.java +++ b/presto-hive/src/test/java/com/facebook/presto/hive/TestingSemiTransactionalHiveMetastore.java @@ -24,6 +24,7 @@ import com.facebook.presto.hive.metastore.HivePartitionMutator; import com.facebook.presto.hive.metastore.HivePrivilegeInfo; import com.facebook.presto.hive.metastore.HiveTableName; +import com.facebook.presto.hive.metastore.MetastoreContext; import com.facebook.presto.hive.metastore.Partition; import com.facebook.presto.hive.metastore.PartitionStatistics; import com.facebook.presto.hive.metastore.PrincipalPrivileges; @@ -75,7 +76,7 @@ public static TestingSemiTransactionalHiveMetastore create() HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(config, metastoreClientConfig), ImmutableSet.of()); HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, metastoreClientConfig, new NoHdfsAuthentication()); HiveCluster hiveCluster = new TestingHiveCluster(metastoreClientConfig, HOST, PORT); - ExtendedHiveMetastore delegate = new BridgingHiveMetastore(new ThriftHiveMetastore(hiveCluster), new HivePartitionMutator()); + ExtendedHiveMetastore delegate = new BridgingHiveMetastore(new ThriftHiveMetastore(hiveCluster, metastoreClientConfig), new HivePartitionMutator()); ExecutorService executor = newCachedThreadPool(daemonThreadsNamed("hive-%s")); ListeningExecutorService renameExecutor = listeningDecorator(executor); @@ -90,85 +91,85 @@ public void addTable(String database, String tableName, Table table, List getAllDatabases() + public synchronized List getAllDatabases(MetastoreContext metastoreContext) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized Optional getDatabase(String databaseName) + public synchronized Optional getDatabase(MetastoreContext metastoreContext, String databaseName) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized Optional> getAllTables(String databaseName) + public synchronized Optional> getAllTables(MetastoreContext metastoreContext, String databaseName) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized Optional
getTable(String databaseName, String tableName) + public synchronized Optional
getTable(MetastoreContext metastoreContext, String databaseName, String tableName) { return Optional.ofNullable(tablesMap.get(new HiveTableName(databaseName, tableName))); } @Override - public synchronized Set getSupportedColumnStatistics(Type type) + public synchronized Set getSupportedColumnStatistics(MetastoreContext metastoreContext, Type type) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized PartitionStatistics getTableStatistics(String databaseName, String tableName) + public synchronized PartitionStatistics getTableStatistics(MetastoreContext metastoreContext, String databaseName, String tableName) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized Map getPartitionStatistics(String databaseName, String tableName, Set partitionNames) + public synchronized Map getPartitionStatistics(MetastoreContext metastoreContext, String databaseName, String tableName, Set partitionNames) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized HivePageSinkMetadata generatePageSinkMetadata(SchemaTableName schemaTableName) + public synchronized HivePageSinkMetadata generatePageSinkMetadata(MetastoreContext metastoreContext, SchemaTableName schemaTableName) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized Optional> getAllViews(String databaseName) + public synchronized Optional> getAllViews(MetastoreContext metastoreContext, String databaseName) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized void createDatabase(Database database) + public synchronized void createDatabase(MetastoreContext metastoreContext, Database database) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized void dropDatabase(String schemaName) + public synchronized void dropDatabase(MetastoreContext metastoreContext, String schemaName) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized void renameDatabase(String source, String target) + public synchronized void renameDatabase(MetastoreContext metastoreContext, String source, String target) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized void setTableStatistics(Table table, PartitionStatistics tableStatistics) + public synchronized void setTableStatistics(MetastoreContext metastoreContext, Table table, PartitionStatistics tableStatistics) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized void setPartitionStatistics(Table table, Map, PartitionStatistics> partitionStatisticsMap) + public synchronized void setPartitionStatistics(MetastoreContext metastoreContext, Table table, Map, PartitionStatistics> partitionStatisticsMap) { throw new UnsupportedOperationException("method not implemented"); } @@ -186,31 +187,31 @@ public synchronized void dropTable(HdfsContext context, String databaseName, Str } @Override - public synchronized void replaceView(String databaseName, String tableName, Table table, PrincipalPrivileges principalPrivileges) + public synchronized void replaceView(MetastoreContext metastoreContext, String databaseName, String tableName, Table table, PrincipalPrivileges principalPrivileges) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized void renameTable(String databaseName, String tableName, String newDatabaseName, String newTableName) + public synchronized void renameTable(MetastoreContext metastoreContext, String databaseName, String tableName, String newDatabaseName, String newTableName) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized void addColumn(String databaseName, String tableName, String columnName, HiveType columnType, String columnComment) + public synchronized void addColumn(MetastoreContext metastoreContext, String databaseName, String tableName, String columnName, HiveType columnType, String columnComment) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized void renameColumn(String databaseName, String tableName, String oldColumnName, String newColumnName) + public synchronized void renameColumn(MetastoreContext metastoreContext, String databaseName, String tableName, String oldColumnName, String newColumnName) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized void dropColumn(String databaseName, String tableName, String columnName) + public synchronized void dropColumn(MetastoreContext metastoreContext, String databaseName, String tableName, String columnName) { throw new UnsupportedOperationException("method not implemented"); } @@ -228,25 +229,25 @@ public synchronized void truncateUnpartitionedTable(ConnectorSession session, St } @Override - public synchronized Optional> getPartitionNames(String databaseName, String tableName) + public synchronized Optional> getPartitionNames(MetastoreContext metastoreContext, String databaseName, String tableName) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized Optional> getPartitionNamesByFilter(String databaseName, String tableName, Map effectivePredicate) + public synchronized Optional> getPartitionNamesByFilter(MetastoreContext metastoreContext, String databaseName, String tableName, Map effectivePredicate) { return Optional.ofNullable(partitionsMap.get(new HiveTableName(databaseName, tableName))); } @Override - public synchronized Optional getPartition(String databaseName, String tableName, List partitionValues) + public synchronized Optional getPartition(MetastoreContext metastoreContext, String databaseName, String tableName, List partitionValues) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized Map> getPartitionsByNames(String databaseName, String tableName, List partitionNames) + public synchronized Map> getPartitionsByNames(MetastoreContext metastoreContext, String databaseName, String tableName, List partitionNames) { throw new UnsupportedOperationException("method not implemented"); } @@ -270,61 +271,61 @@ public synchronized void finishInsertIntoExistingPartition(ConnectorSession sess } @Override - public synchronized void createRole(String role, String grantor) + public synchronized void createRole(MetastoreContext metastoreContext, String role, String grantor) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized void dropRole(String role) + public synchronized void dropRole(MetastoreContext metastoreContext, String role) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized Set listRoles() + public synchronized Set listRoles(MetastoreContext metastoreContext) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized void grantRoles(Set roles, Set grantees, boolean withAdminOption, PrestoPrincipal grantor) + public synchronized void grantRoles(MetastoreContext metastoreContext, Set roles, Set grantees, boolean withAdminOption, PrestoPrincipal grantor) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized void revokeRoles(Set roles, Set grantees, boolean adminOptionFor, PrestoPrincipal grantor) + public synchronized void revokeRoles(MetastoreContext metastoreContext, Set roles, Set grantees, boolean adminOptionFor, PrestoPrincipal grantor) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized Set listRoleGrants(PrestoPrincipal principal) + public synchronized Set listRoleGrants(MetastoreContext metastoreContext, PrestoPrincipal principal) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized Set listTablePrivileges(String databaseName, String tableName, PrestoPrincipal principal) + public synchronized Set listTablePrivileges(MetastoreContext metastoreContext, String databaseName, String tableName, PrestoPrincipal principal) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized void grantTablePrivileges(String databaseName, String tableName, PrestoPrincipal grantee, Set privileges) + public synchronized void grantTablePrivileges(MetastoreContext metastoreContext, String databaseName, String tableName, PrestoPrincipal grantee, Set privileges) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized void revokeTablePrivileges(String databaseName, String tableName, PrestoPrincipal grantee, Set privileges) + public synchronized void revokeTablePrivileges(MetastoreContext metastoreContext, String databaseName, String tableName, PrestoPrincipal grantee, Set privileges) { throw new UnsupportedOperationException("method not implemented"); } @Override - public synchronized void declareIntentionToWrite(HdfsContext context, LocationHandle.WriteMode writeMode, Path stagingPathRoot, Optional tempPathRoot, String filePrefix, SchemaTableName schemaTableName, boolean temporaryTable) + public synchronized void declareIntentionToWrite(HdfsContext context, MetastoreContext metastoreContext, LocationHandle.WriteMode writeMode, Path stagingPathRoot, Optional tempPathRoot, String filePrefix, SchemaTableName schemaTableName, boolean temporaryTable) { throw new UnsupportedOperationException("method not implemented"); } diff --git a/presto-hive/src/test/java/com/facebook/presto/hive/metastore/glue/TestHiveClientGlueMetastore.java b/presto-hive/src/test/java/com/facebook/presto/hive/metastore/glue/TestHiveClientGlueMetastore.java index 4bf99a018f1a9..181dc2533ad2f 100644 --- a/presto-hive/src/test/java/com/facebook/presto/hive/metastore/glue/TestHiveClientGlueMetastore.java +++ b/presto-hive/src/test/java/com/facebook/presto/hive/metastore/glue/TestHiveClientGlueMetastore.java @@ -22,6 +22,7 @@ import com.facebook.presto.hive.MetastoreClientConfig; import com.facebook.presto.hive.authentication.NoHdfsAuthentication; import com.facebook.presto.hive.metastore.ExtendedHiveMetastore; +import com.facebook.presto.hive.metastore.MetastoreContext; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import org.testng.annotations.AfterClass; @@ -129,7 +130,7 @@ public void testGetPartitions() throws Exception { try { createDummyPartitionedTable(tablePartitionFormat, CREATE_TABLE_COLUMNS_PARTITIONED); - Optional> partitionNames = getMetastoreClient().getPartitionNames(tablePartitionFormat.getSchemaName(), tablePartitionFormat.getTableName()); + Optional> partitionNames = getMetastoreClient().getPartitionNames(new MetastoreContext("test_user"), tablePartitionFormat.getSchemaName(), tablePartitionFormat.getTableName()); assertTrue(partitionNames.isPresent()); assertEquals(partitionNames.get(), ImmutableList.of("ds=2016-01-01", "ds=2016-01-02")); } diff --git a/presto-hive/src/test/java/com/facebook/presto/hive/statistics/TestMetastoreHiveStatisticsProvider.java b/presto-hive/src/test/java/com/facebook/presto/hive/statistics/TestMetastoreHiveStatisticsProvider.java index 28e3ff10c8160..31e91e3f5b21b 100644 --- a/presto-hive/src/test/java/com/facebook/presto/hive/statistics/TestMetastoreHiveStatisticsProvider.java +++ b/presto-hive/src/test/java/com/facebook/presto/hive/statistics/TestMetastoreHiveStatisticsProvider.java @@ -632,7 +632,7 @@ public void testGetTableStatistics() .setBasicStatistics(new HiveBasicStatistics(OptionalLong.empty(), OptionalLong.of(1000), OptionalLong.of(5000), OptionalLong.empty())) .setColumnStatistics(ImmutableMap.of(COLUMN, createIntegerColumnStatistics(OptionalLong.of(-100), OptionalLong.of(100), OptionalLong.of(500), OptionalLong.of(300)))) .build(); - MetastoreHiveStatisticsProvider statisticsProvider = new MetastoreHiveStatisticsProvider((table, hivePartitions) -> ImmutableMap.of(partitionName, statistics)); + MetastoreHiveStatisticsProvider statisticsProvider = new MetastoreHiveStatisticsProvider((session, table, hivePartitions) -> ImmutableMap.of(partitionName, statistics)); TestingConnectorSession session = new TestingConnectorSession(new HiveSessionProperties(new HiveClientConfig(), new OrcFileWriterConfig(), new ParquetFileWriterConfig()).getSessionProperties()); HiveColumnHandle columnHandle = new HiveColumnHandle(COLUMN, HIVE_LONG, BIGINT.getTypeSignature(), 2, REGULAR, Optional.empty(), Optional.empty()); TableStatistics expected = TableStatistics.builder() @@ -683,7 +683,7 @@ public void testGetTableStatisticsUnpartitioned() .setBasicStatistics(new HiveBasicStatistics(OptionalLong.empty(), OptionalLong.of(1000), OptionalLong.of(5000), OptionalLong.empty())) .setColumnStatistics(ImmutableMap.of(COLUMN, createIntegerColumnStatistics(OptionalLong.of(-100), OptionalLong.of(100), OptionalLong.of(500), OptionalLong.of(300)))) .build(); - MetastoreHiveStatisticsProvider statisticsProvider = new MetastoreHiveStatisticsProvider((table, hivePartitions) -> ImmutableMap.of(UNPARTITIONED_ID, statistics)); + MetastoreHiveStatisticsProvider statisticsProvider = new MetastoreHiveStatisticsProvider((session, table, hivePartitions) -> ImmutableMap.of(UNPARTITIONED_ID, statistics)); TestingConnectorSession session = new TestingConnectorSession(new HiveSessionProperties(new HiveClientConfig(), new OrcFileWriterConfig(), new ParquetFileWriterConfig()).getSessionProperties()); HiveColumnHandle columnHandle = new HiveColumnHandle(COLUMN, HIVE_LONG, BIGINT.getTypeSignature(), 2, REGULAR, Optional.empty(), Optional.empty()); TableStatistics expected = TableStatistics.builder() @@ -711,7 +711,7 @@ public void testGetTableStatisticsUnpartitioned() public void testGetTableStatisticsEmpty() { String partitionName = "p1=string1/p2=1234"; - MetastoreHiveStatisticsProvider statisticsProvider = new MetastoreHiveStatisticsProvider((table, hivePartitions) -> ImmutableMap.of(partitionName, PartitionStatistics.empty())); + MetastoreHiveStatisticsProvider statisticsProvider = new MetastoreHiveStatisticsProvider((session, table, hivePartitions) -> ImmutableMap.of(partitionName, PartitionStatistics.empty())); TestingConnectorSession session = new TestingConnectorSession(new HiveSessionProperties(new HiveClientConfig(), new OrcFileWriterConfig(), new ParquetFileWriterConfig()).getSessionProperties()); assertEquals( statisticsProvider.getTableStatistics( @@ -726,7 +726,7 @@ public void testGetTableStatisticsEmpty() @Test public void testGetTableStatisticsSampling() { - MetastoreHiveStatisticsProvider statisticsProvider = new MetastoreHiveStatisticsProvider((table, hivePartitions) -> { + MetastoreHiveStatisticsProvider statisticsProvider = new MetastoreHiveStatisticsProvider((session, table, hivePartitions) -> { assertEquals(table, TABLE); assertEquals(hivePartitions.size(), 1); return ImmutableMap.of(); @@ -751,7 +751,7 @@ public void testGetTableStatisticsValidationFailure() .setBasicStatistics(new HiveBasicStatistics(-1, 0, 0, 0)) .build(); String partitionName = "p1=string1/p2=1234"; - MetastoreHiveStatisticsProvider statisticsProvider = new MetastoreHiveStatisticsProvider((table, hivePartitions) -> ImmutableMap.of(partitionName, corruptedStatistics)); + MetastoreHiveStatisticsProvider statisticsProvider = new MetastoreHiveStatisticsProvider((session, table, hivePartitions) -> ImmutableMap.of(partitionName, corruptedStatistics)); TestingConnectorSession session = new TestingConnectorSession(new HiveSessionProperties( new HiveClientConfig().setIgnoreCorruptedStatistics(false), new OrcFileWriterConfig(), diff --git a/presto-spark-base/src/test/java/com/facebook/presto/spark/PrestoSparkQueryRunner.java b/presto-spark-base/src/test/java/com/facebook/presto/spark/PrestoSparkQueryRunner.java index 979a804af9c50..db06d2402f07f 100644 --- a/presto-spark-base/src/test/java/com/facebook/presto/spark/PrestoSparkQueryRunner.java +++ b/presto-spark-base/src/test/java/com/facebook/presto/spark/PrestoSparkQueryRunner.java @@ -30,6 +30,7 @@ import com.facebook.presto.hive.authentication.NoHdfsAuthentication; import com.facebook.presto.hive.metastore.Database; import com.facebook.presto.hive.metastore.ExtendedHiveMetastore; +import com.facebook.presto.hive.metastore.MetastoreContext; import com.facebook.presto.hive.metastore.file.FileHiveMetastore; import com.facebook.presto.metadata.Catalog; import com.facebook.presto.metadata.CatalogManager; @@ -153,8 +154,8 @@ public static PrestoSparkQueryRunner createHivePrestoSparkQueryRunner(Iterable additio HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, metastoreClientConfig, new NoHdfsAuthentication()); this.metastore = new FileHiveMetastore(hdfsEnvironment, baseDir.toURI().toString(), "test"); - metastore.createDatabase(createDatabaseMetastoreObject("hive_test")); + metastore.createDatabase(new MetastoreContext("test_user"), createDatabaseMetastoreObject("hive_test")); pluginManager.installPlugin(new HivePlugin("hive", Optional.of(metastore))); connectorManager.createConnection("hive", "hive", ImmutableMap.of()); diff --git a/presto-spark-base/src/test/java/com/facebook/presto/spark/TestPrestoSparkQueryRunner.java b/presto-spark-base/src/test/java/com/facebook/presto/spark/TestPrestoSparkQueryRunner.java index 651a2000a0834..4eaf72716cd63 100644 --- a/presto-spark-base/src/test/java/com/facebook/presto/spark/TestPrestoSparkQueryRunner.java +++ b/presto-spark-base/src/test/java/com/facebook/presto/spark/TestPrestoSparkQueryRunner.java @@ -14,6 +14,7 @@ package com.facebook.presto.spark; import com.facebook.presto.Session; +import com.facebook.presto.hive.metastore.MetastoreContext; import com.facebook.presto.testing.MaterializedResult; import com.facebook.presto.testing.QueryRunner; import com.facebook.presto.tests.AbstractTestQueryFramework; @@ -246,7 +247,7 @@ private void testBucketedTableWriteJoin(Session session, int firstInputBucketCou private void dropTable(String schema, String table) { - ((PrestoSparkQueryRunner) getQueryRunner()).getMetastore().dropTable(schema, table, true); + ((PrestoSparkQueryRunner) getQueryRunner()).getMetastore().dropTable(new MetastoreContext("test_user"), schema, table, true); } @Test