Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions plugin/trino-hive/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -371,6 +371,12 @@
<scope>test</scope>
</dependency>

<dependency>
<groupId>io.trino</groupId>
<artifactId>trino-tpcds</artifactId>
<scope>test</scope>
</dependency>

<dependency>
<groupId>io.trino</groupId>
<artifactId>trino-tpch</artifactId>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,9 @@
import io.trino.plugin.hive.metastore.MetastoreConfig;
import io.trino.plugin.hive.metastore.file.FileHiveMetastore;
import io.trino.plugin.hive.metastore.file.FileHiveMetastoreConfig;
import io.trino.plugin.tpcds.TpcdsPlugin;
import io.trino.plugin.tpch.ColumnNaming;
import io.trino.plugin.tpch.DecimalTypeMapping;
import io.trino.plugin.tpch.TpchPlugin;
import io.trino.spi.security.Identity;
import io.trino.spi.security.PrincipalType;
Expand All @@ -49,7 +52,10 @@
import static io.airlift.log.Level.WARN;
import static io.airlift.units.Duration.nanosSince;
import static io.trino.plugin.hive.HiveTestUtils.HDFS_ENVIRONMENT;
import static io.trino.plugin.hive.security.HiveSecurityModule.ALLOW_ALL;
import static io.trino.plugin.hive.security.HiveSecurityModule.SQL_STANDARD;
import static io.trino.plugin.tpch.ColumnNaming.SIMPLIFIED;
import static io.trino.plugin.tpch.DecimalTypeMapping.DOUBLE;
import static io.trino.plugin.tpch.TpchMetadata.TINY_SCHEMA_NAME;
import static io.trino.spi.security.SelectedRole.Type.ROLE;
import static io.trino.testing.QueryAssertions.copyTpchTables;
Expand Down Expand Up @@ -110,6 +116,10 @@ public static class Builder<SELF extends Builder<?>>
};
private Module module = EMPTY_MODULE;
private Optional<CachingDirectoryLister> cachingDirectoryLister = Optional.empty();
private boolean tpcdsCatalogEnabled;
private String security = SQL_STANDARD;
private ColumnNaming tpchColumnNaming = SIMPLIFIED;
private DecimalTypeMapping tpchDecimalTypeMapping = DOUBLE;

protected Builder()
{
Expand Down Expand Up @@ -182,6 +192,30 @@ public SELF setCachingDirectoryLister(CachingDirectoryLister cachingDirectoryLis
return self();
}

public SELF setTpcdsCatalogEnabled(boolean tpcdsCatalogEnabled)
{
this.tpcdsCatalogEnabled = tpcdsCatalogEnabled;
return self();
}

public SELF setSecurity(String security)
{
this.security = requireNonNull(security, "security is null");
return self();
}

public SELF setTpchColumnNaming(ColumnNaming tpchColumnNaming)
{
this.tpchColumnNaming = requireNonNull(tpchColumnNaming, "tpchColumnNaming is null");
return self();
}

public SELF setTpchDecimalTypeMapping(DecimalTypeMapping tpchDecimalTypeMapping)
{
this.tpchDecimalTypeMapping = requireNonNull(tpchDecimalTypeMapping, "tpchDecimalTypeMapping is null");
return self();
}

@Override
public DistributedQueryRunner build()
throws Exception
Expand All @@ -192,7 +226,16 @@ public DistributedQueryRunner build()

try {
queryRunner.installPlugin(new TpchPlugin());
queryRunner.createCatalog("tpch", "tpch");
Map<String, String> tpchCatalogProperties = ImmutableMap.<String, String>builder()
.put("tpch.column-naming", tpchColumnNaming.name())
.put("tpch.double-type-mapping", tpchDecimalTypeMapping.name())
.buildOrThrow();
queryRunner.createCatalog("tpch", "tpch", tpchCatalogProperties);

if (tpcdsCatalogEnabled) {
queryRunner.installPlugin(new TpcdsPlugin());
queryRunner.createCatalog("tpcds", "tpcds");
}

HiveMetastore metastore = this.metastore.apply(queryRunner);
queryRunner.installPlugin(new TestingHivePlugin(metastore, module, cachingDirectoryLister));
Expand All @@ -209,7 +252,7 @@ public DistributedQueryRunner build()
hiveProperties.put("hive.parquet.time-zone", TIME_ZONE.getID());
}
hiveProperties.put("hive.max-partitions-per-scan", "1000");
hiveProperties.put("hive.security", SQL_STANDARD);
hiveProperties.put("hive.security", security);
hiveProperties.putAll(this.hiveProperties.buildOrThrow());

Map<String, String> hiveBucketedProperties = ImmutableMap.<String, String>builder()
Expand Down Expand Up @@ -243,7 +286,7 @@ private void populateData(DistributedQueryRunner queryRunner, HiveMetastore meta
if (metastore.getDatabase(TPCH_BUCKETED_SCHEMA).isEmpty()) {
metastore.createDatabase(createDatabaseMetastoreObject(TPCH_BUCKETED_SCHEMA, initialSchemasLocationBase));
Session session = initialTablesSessionMutator.apply(createBucketedSession(Optional.empty()));
copyTpchTablesBucketed(queryRunner, "tpch", TINY_SCHEMA_NAME, session, initialTables);
copyTpchTablesBucketed(queryRunner, "tpch", TINY_SCHEMA_NAME, session, initialTables, tpchColumnNaming);
}
}
}
Expand Down Expand Up @@ -297,49 +340,55 @@ private static void copyTpchTablesBucketed(
String sourceCatalog,
String sourceSchema,
Session session,
Iterable<TpchTable<?>> tables)
Iterable<TpchTable<?>> tables,
ColumnNaming columnNaming)
{
log.info("Loading data from %s.%s...", sourceCatalog, sourceSchema);
long startTime = System.nanoTime();
for (TpchTable<?> table : tables) {
copyTableBucketed(queryRunner, new QualifiedObjectName(sourceCatalog, sourceSchema, table.getTableName().toLowerCase(ENGLISH)), session);
copyTableBucketed(queryRunner, new QualifiedObjectName(sourceCatalog, sourceSchema, table.getTableName().toLowerCase(ENGLISH)), table, session, columnNaming);
}
log.info("Loading from %s.%s complete in %s", sourceCatalog, sourceSchema, nanosSince(startTime).toString(SECONDS));
}

private static void copyTableBucketed(QueryRunner queryRunner, QualifiedObjectName table, Session session)
private static void copyTableBucketed(QueryRunner queryRunner, QualifiedObjectName tableName, TpchTable<?> table, Session session, ColumnNaming columnNaming)
{
long start = System.nanoTime();
log.info("Running import for %s", table.getObjectName());
log.info("Running import for %s", tableName.getObjectName());
@Language("SQL") String sql;
switch (table.getObjectName()) {
switch (tableName.getObjectName()) {
case "part":
case "partsupp":
case "supplier":
case "nation":
case "region":
sql = format("CREATE TABLE %s AS SELECT * FROM %s", table.getObjectName(), table);
sql = format("CREATE TABLE %s AS SELECT * FROM %s", tableName.getObjectName(), tableName);
break;
case "lineitem":
sql = format("CREATE TABLE %s WITH (bucketed_by=array['orderkey'], bucket_count=11) AS SELECT * FROM %s", table.getObjectName(), table);
sql = format(
"CREATE TABLE %s WITH (bucketed_by=array['%s'], bucket_count=11) AS SELECT * FROM %s",
tableName.getObjectName(),
columnNaming.getName(table.getColumn("orderkey")),
tableName);
break;
case "customer":
sql = format("CREATE TABLE %s WITH (bucketed_by=array['custkey'], bucket_count=11) AS SELECT * FROM %s", table.getObjectName(), table);
break;
case "orders":
sql = format("CREATE TABLE %s WITH (bucketed_by=array['custkey'], bucket_count=11) AS SELECT * FROM %s", table.getObjectName(), table);
sql = format(
"CREATE TABLE %s WITH (bucketed_by=array['%s'], bucket_count=11) AS SELECT * FROM %s",
tableName.getObjectName(),
columnNaming.getName(table.getColumn("custkey")),
tableName);
break;
default:
throw new UnsupportedOperationException();
}
long rows = (Long) queryRunner.execute(session, sql).getMaterializedRows().get(0).getField(0);
log.info("Imported %s rows for %s in %s", rows, table.getObjectName(), nanosSince(start).convertToMostSuccinctTimeUnit());
log.info("Imported %s rows for %s in %s", rows, tableName.getObjectName(), nanosSince(start).convertToMostSuccinctTimeUnit());
}

public static void main(String[] args)
throws Exception
{
// You need to add "--user admin" to your CLI and execute "SET ROLE admin IN hive" for queries to work
Optional<Path> baseDataDir = Optional.empty();
if (args.length > 0) {
if (args.length != 1) {
Expand All @@ -358,6 +407,12 @@ public static void main(String[] args)
.setHiveProperties(ImmutableMap.of())
.setInitialTables(TpchTable.getTables())
.setBaseDataDir(baseDataDir)
.setTpcdsCatalogEnabled(true)
.setSecurity(ALLOW_ALL)
// Uncomment to enable standard column naming (column names to be prefixed with the first letter of the table name, e.g.: o_orderkey vs orderkey)
// and standard column types (decimals vs double for some columns). This will allow running unmodified tpch queries on the cluster.
// .setTpchColumnNaming(STANDARD)
// .setTpchDecimalTypeMapping(DECIMAL)
.build();
Thread.sleep(10);
log.info("======== SERVER STARTED ========");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@
import io.trino.spi.statistics.Estimate;
import io.trino.spi.statistics.TableStatistics;
import io.trino.spi.statistics.TableStatisticsMetadata;
import io.trino.spi.type.DecimalType;
import io.trino.spi.type.Type;
import io.trino.spi.type.VarcharType;
import io.trino.tpch.Distributions;
Expand Down Expand Up @@ -368,7 +369,7 @@ private static double toDouble(Object value, Type columnType)
if (columnType.equals(BIGINT) || columnType.equals(INTEGER) || columnType.equals(DATE)) {
return ((Number) value).longValue();
}
if (columnType.equals(DOUBLE)) {
if (columnType.equals(DOUBLE) || columnType instanceof DecimalType) {
return ((Number) value).doubleValue();
}
}
Expand Down