Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions presto-docs/src/main/sphinx/connector/hive-security.rst
Original file line number Diff line number Diff line change
Expand Up @@ -131,6 +131,7 @@ Property Name Description
to the Hive metastore service.

``hive.metastore.client.keytab`` Hive metastore client keytab location.
``hive.metastore-impersonation-enabled`` Enable metastore end-user impersonation.
================================================== ============================================================

``hive.metastore.authentication.type``
Expand Down
1 change: 1 addition & 0 deletions presto-docs/src/main/sphinx/connector/hive.rst
Original file line number Diff line number Diff line change
Expand Up @@ -184,6 +184,7 @@ Property Name Description

``hive.s3select-pushdown.max-connections`` Maximum number of simultaneously open connections to S3 for 500
S3SelectPushdown.
``hive.metastore.load-balancing-enabled`` Enable load balancing between multiple Metastore instances
================================================== ============================================================ ============

Metastore Configuration Properties
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
import com.facebook.presto.hive.MetastoreClientConfig;
import com.facebook.presto.hive.authentication.NoHdfsAuthentication;
import com.facebook.presto.hive.metastore.Database;
import com.facebook.presto.hive.metastore.MetastoreContext;
import com.facebook.presto.hive.metastore.file.FileHiveMetastore;
import com.facebook.presto.spi.security.PrincipalType;
import com.facebook.presto.tests.AbstractTestQueryFramework;
Expand Down Expand Up @@ -104,7 +105,7 @@ private static DistributedQueryRunner createQueryRunner()
HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, metastoreClientConfig, new NoHdfsAuthentication());

FileHiveMetastore metastore = new FileHiveMetastore(hdfsEnvironment, baseDir.toURI().toString(), "test");
metastore.createDatabase(Database.builder()
metastore.createDatabase(new MetastoreContext("test_user"), Database.builder()
.setDatabaseName("default")
.setOwnerName("public")
.setOwnerType(PrincipalType.ROLE)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
package com.facebook.presto.hive;

import com.facebook.airlift.configuration.Config;
import com.facebook.airlift.configuration.ConfigDescription;
import com.facebook.presto.hive.metastore.CachingHiveMetastore.MetastoreCacheScope;
import com.google.common.net.HostAndPort;
import io.airlift.units.Duration;
Expand Down Expand Up @@ -44,6 +45,7 @@ public class MetastoreClientConfig
private Duration recordingDuration = new Duration(0, MINUTES);
private boolean partitionVersioningEnabled;
private MetastoreCacheScope metastoreCacheScope = MetastoreCacheScope.ALL;
private boolean metastoreImpersonationEnabled;

public HostAndPort getMetastoreSocksProxy()
{
Expand Down Expand Up @@ -222,4 +224,17 @@ public MetastoreClientConfig setMetastoreCacheScope(MetastoreCacheScope metastor
this.metastoreCacheScope = metastoreCacheScope;
return this;
}

public boolean isMetastoreImpersonationEnabled()
{
return metastoreImpersonationEnabled;
}

@Config("hive.metastore-impersonation-enabled")
@ConfigDescription("Should Presto user be impersonated when communicating with Hive Metastore")
public MetastoreClientConfig setMetastoreImpersonationEnabled(boolean metastoreImpersonationEnabled)
{
this.metastoreImpersonationEnabled = metastoreImpersonationEnabled;
return this;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,9 @@

import org.apache.thrift.transport.TTransport;

import java.util.Optional;

public interface HiveMetastoreAuthentication
{
TTransport authenticate(TTransport rawTransport, String hiveMetastoreHost);
TTransport authenticate(TTransport rawTransport, String hiveMetastoreHost, Optional<String> tokenString);
}
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,13 @@

import org.apache.thrift.transport.TTransport;

import java.util.Optional;

public class NoHiveMetastoreAuthentication
implements HiveMetastoreAuthentication
{
@Override
public TTransport authenticate(TTransport rawTransport, String hiveMetastoreHost)
public TTransport authenticate(TTransport rawTransport, String hiveMetastoreHost, Optional<String> tokenString)
{
return rawTransport;
}
Expand Down

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -28,88 +28,90 @@

public interface ExtendedHiveMetastore
{
Optional<Database> getDatabase(String databaseName);
Optional<Database> getDatabase(MetastoreContext metastoreContext, String databaseName);

List<String> getAllDatabases();
List<String> getAllDatabases(MetastoreContext metastoreContext);

Optional<Table> getTable(String databaseName, String tableName);
Optional<Table> getTable(MetastoreContext metastoreContext, String databaseName, String tableName);

Set<ColumnStatisticType> getSupportedColumnStatistics(Type type);
Set<ColumnStatisticType> getSupportedColumnStatistics(MetastoreContext metastoreContext, Type type);

PartitionStatistics getTableStatistics(String databaseName, String tableName);
PartitionStatistics getTableStatistics(MetastoreContext metastoreContext, String databaseName, String tableName);

Map<String, PartitionStatistics> getPartitionStatistics(String databaseName, String tableName, Set<String> partitionNames);
Map<String, PartitionStatistics> getPartitionStatistics(MetastoreContext metastoreContext, String databaseName, String tableName, Set<String> partitionNames);

void updateTableStatistics(String databaseName, String tableName, Function<PartitionStatistics, PartitionStatistics> update);
void updateTableStatistics(MetastoreContext metastoreContext, String databaseName, String tableName, Function<PartitionStatistics, PartitionStatistics> update);

void updatePartitionStatistics(String databaseName, String tableName, String partitionName, Function<PartitionStatistics, PartitionStatistics> update);
void updatePartitionStatistics(MetastoreContext metastoreContext, String databaseName, String tableName, String partitionName, Function<PartitionStatistics, PartitionStatistics> update);

Optional<List<String>> getAllTables(String databaseName);
Optional<List<String>> getAllTables(MetastoreContext metastoreContext, String databaseName);

Optional<List<String>> getAllViews(String databaseName);
Optional<List<String>> getAllViews(MetastoreContext metastoreContext, String databaseName);

void createDatabase(Database database);
void createDatabase(MetastoreContext metastoreContext, Database database);

void dropDatabase(String databaseName);
void dropDatabase(MetastoreContext metastoreContext, String databaseName);

void renameDatabase(String databaseName, String newDatabaseName);
void renameDatabase(MetastoreContext metastoreContext, String databaseName, String newDatabaseName);

void createTable(Table table, PrincipalPrivileges principalPrivileges);
void createTable(MetastoreContext metastoreContext, Table table, PrincipalPrivileges principalPrivileges);

void dropTable(String databaseName, String tableName, boolean deleteData);
void dropTable(MetastoreContext metastoreContext, String databaseName, String tableName, boolean deleteData);

/**
* This should only be used if the semantic here is drop and add. Trying to
* alter one field of a table object previously acquired from getTable is
* probably not what you want.
*/
void replaceTable(String databaseName, String tableName, Table newTable, PrincipalPrivileges principalPrivileges);
void replaceTable(MetastoreContext metastoreContext, String databaseName, String tableName, Table newTable, PrincipalPrivileges principalPrivileges);

void renameTable(String databaseName, String tableName, String newDatabaseName, String newTableName);
void renameTable(MetastoreContext metastoreContext, String databaseName, String tableName, String newDatabaseName, String newTableName);

void addColumn(String databaseName, String tableName, String columnName, HiveType columnType, String columnComment);
void addColumn(MetastoreContext metastoreContext, String databaseName, String tableName, String columnName, HiveType columnType, String columnComment);

void renameColumn(String databaseName, String tableName, String oldColumnName, String newColumnName);
void renameColumn(MetastoreContext metastoreContext, String databaseName, String tableName, String oldColumnName, String newColumnName);

void dropColumn(String databaseName, String tableName, String columnName);
void dropColumn(MetastoreContext metastoreContext, String databaseName, String tableName, String columnName);

Optional<Partition> getPartition(String databaseName, String tableName, List<String> partitionValues);
Optional<Partition> getPartition(MetastoreContext metastoreContext, String databaseName, String tableName, List<String> partitionValues);

Optional<List<String>> getPartitionNames(String databaseName, String tableName);
Optional<List<String>> getPartitionNames(MetastoreContext metastoreContext, String databaseName, String tableName);

List<String> getPartitionNamesByFilter(
MetastoreContext metastoreContext,
String databaseName,
String tableName,
Map<Column, Domain> partitionPredicates);

List<PartitionNameWithVersion> getPartitionNamesWithVersionByFilter(
MetastoreContext metastoreContext,
String databaseName,
String tableName,
Map<Column, Domain> partitionPredicates);

Map<String, Optional<Partition>> getPartitionsByNames(String databaseName, String tableName, List<String> partitionNames);
Map<String, Optional<Partition>> getPartitionsByNames(MetastoreContext metastoreContext, String databaseName, String tableName, List<String> partitionNames);

void addPartitions(String databaseName, String tableName, List<PartitionWithStatistics> partitions);
void addPartitions(MetastoreContext metastoreContext, String databaseName, String tableName, List<PartitionWithStatistics> partitions);

void dropPartition(String databaseName, String tableName, List<String> parts, boolean deleteData);
void dropPartition(MetastoreContext metastoreContext, String databaseName, String tableName, List<String> parts, boolean deleteData);

void alterPartition(String databaseName, String tableName, PartitionWithStatistics partition);
void alterPartition(MetastoreContext metastoreContext, String databaseName, String tableName, PartitionWithStatistics partition);

void createRole(String role, String grantor);
void createRole(MetastoreContext metastoreContext, String role, String grantor);

void dropRole(String role);
void dropRole(MetastoreContext metastoreContext, String role);

Set<String> listRoles();
Set<String> listRoles(MetastoreContext metastoreContext);

void grantRoles(Set<String> roles, Set<PrestoPrincipal> grantees, boolean withAdminOption, PrestoPrincipal grantor);
void grantRoles(MetastoreContext metastoreContext, Set<String> roles, Set<PrestoPrincipal> grantees, boolean withAdminOption, PrestoPrincipal grantor);

void revokeRoles(Set<String> roles, Set<PrestoPrincipal> grantees, boolean adminOptionFor, PrestoPrincipal grantor);
void revokeRoles(MetastoreContext metastoreContext, Set<String> roles, Set<PrestoPrincipal> grantees, boolean adminOptionFor, PrestoPrincipal grantor);

Set<RoleGrant> listRoleGrants(PrestoPrincipal principal);
Set<RoleGrant> listRoleGrants(MetastoreContext metastoreContext, PrestoPrincipal principal);

void grantTablePrivileges(String databaseName, String tableName, PrestoPrincipal grantee, Set<HivePrivilegeInfo> privileges);
void grantTablePrivileges(MetastoreContext metastoreContext, String databaseName, String tableName, PrestoPrincipal grantee, Set<HivePrivilegeInfo> privileges);

void revokeTablePrivileges(String databaseName, String tableName, PrestoPrincipal grantee, Set<HivePrivilegeInfo> privileges);
void revokeTablePrivileges(MetastoreContext metastoreContext, String databaseName, String tableName, PrestoPrincipal grantee, Set<HivePrivilegeInfo> privileges);

Set<HivePrivilegeInfo> listTablePrivileges(String databaseName, String tableName, PrestoPrincipal principal);
Set<HivePrivilegeInfo> listTablePrivileges(MetastoreContext metastoreContext, String databaseName, String tableName, PrestoPrincipal principal);
}
Original file line number Diff line number Diff line change
Expand Up @@ -28,14 +28,16 @@ public class HivePageSinkMetadataProvider
private final SchemaTableName schemaTableName;
private final Optional<Table> table;
private final Map<List<String>, Optional<Partition>> modifiedPartitions;
private final MetastoreContext metastoreContext;

public HivePageSinkMetadataProvider(HivePageSinkMetadata pageSinkMetadata, ExtendedHiveMetastore delegate)
public HivePageSinkMetadataProvider(HivePageSinkMetadata pageSinkMetadata, ExtendedHiveMetastore delegate, MetastoreContext metastoreContext)
{
requireNonNull(pageSinkMetadata, "pageSinkMetadata is null");
this.delegate = delegate;
this.schemaTableName = pageSinkMetadata.getSchemaTableName();
this.table = pageSinkMetadata.getTable();
this.modifiedPartitions = pageSinkMetadata.getModifiedPartitions();
this.metastoreContext = requireNonNull(metastoreContext, "metastoreContext is null");
}

public Optional<Table> getTable()
Expand All @@ -51,7 +53,7 @@ public Optional<Partition> getPartition(List<String> partitionValues)
}
Optional<Partition> modifiedPartition = modifiedPartitions.get(partitionValues);
if (modifiedPartition == null) {
return delegate.getPartition(schemaTableName.getSchemaName(), schemaTableName.getTableName(), partitionValues);
return delegate.getPartition(metastoreContext, schemaTableName.getSchemaName(), schemaTableName.getTableName(), partitionValues);
}
else {
return modifiedPartition;
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.presto.hive.metastore;

import com.facebook.presto.spi.security.ConnectorIdentity;

import java.util.Objects;

import static com.google.common.base.MoreObjects.toStringHelper;
import static java.util.Objects.requireNonNull;

public class MetastoreContext
{
private final String username;

public MetastoreContext(ConnectorIdentity identity)
{
requireNonNull(identity, "identity is null");
this.username = requireNonNull(identity.getUser(), "identity.getUser() is null");
}

public MetastoreContext(String username)
{
this.username = requireNonNull(username, "username is null");
}

public String getUsername()
{
return username;
}

@Override
public String toString()
{
return toStringHelper(this)
.add("username", username)
.toString();
}

@Override
public boolean equals(Object o)
{
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}

MetastoreContext other = (MetastoreContext) o;
return Objects.equals(username, other.username);
}

@Override
public int hashCode()
{
return Objects.hash(username);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -411,9 +411,9 @@ public static void verifyOnline(SchemaTableName tableName, Optional<String> part
}
}

public static void verifyCanDropColumn(ExtendedHiveMetastore metastore, String databaseName, String tableName, String columnName)
public static void verifyCanDropColumn(ExtendedHiveMetastore metastore, MetastoreContext metastoreContext, String databaseName, String tableName, String columnName)
{
Table table = metastore.getTable(databaseName, tableName)
Table table = metastore.getTable(metastoreContext, databaseName, tableName)
.orElseThrow(() -> new TableNotFoundException(new SchemaTableName(databaseName, tableName)));

if (table.getPartitionColumns().stream().anyMatch(column -> column.getName().equals(columnName))) {
Expand Down
Loading