Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,7 @@
import static com.facebook.presto.SystemSessionProperties.isDistributedSortEnabled;
import static com.facebook.presto.SystemSessionProperties.isExactPartitioningPreferred;
import static com.facebook.presto.SystemSessionProperties.isForceSingleNodeOutput;
import static com.facebook.presto.SystemSessionProperties.isNativeExecutionEnabled;
import static com.facebook.presto.SystemSessionProperties.isPreferDistributedUnion;
import static com.facebook.presto.SystemSessionProperties.isPrestoSparkAssignBucketToPartitionForPartitionedTableWriteEnabled;
import static com.facebook.presto.SystemSessionProperties.isRedistributeWrites;
Expand All @@ -116,6 +117,7 @@
import static com.facebook.presto.SystemSessionProperties.preferStreamingOperators;
import static com.facebook.presto.expressions.LogicalRowExpressions.TRUE_CONSTANT;
import static com.facebook.presto.operator.aggregation.AggregationUtils.hasSingleNodeExecutionPreference;
import static com.facebook.presto.spi.ConnectorId.isInternalSystemConnector;
import static com.facebook.presto.spi.StandardErrorCode.NOT_SUPPORTED;
import static com.facebook.presto.spi.plan.LimitNode.Step.PARTIAL;
import static com.facebook.presto.sql.planner.FragmentTableScanCounter.getNumberOfTableScans;
Expand All @@ -129,6 +131,7 @@
import static com.facebook.presto.sql.planner.optimizations.ActualProperties.Global.partitionedOn;
import static com.facebook.presto.sql.planner.optimizations.ActualProperties.Global.singleStreamPartition;
import static com.facebook.presto.sql.planner.optimizations.LocalProperties.grouped;
import static com.facebook.presto.sql.planner.optimizations.PlanNodeSearcher.searchFrom;
import static com.facebook.presto.sql.planner.optimizations.SetOperationNodeUtils.fromListMultimap;
import static com.facebook.presto.sql.planner.plan.ExchangeNode.Scope.REMOTE_MATERIALIZED;
import static com.facebook.presto.sql.planner.plan.ExchangeNode.Scope.REMOTE_STREAMING;
Expand Down Expand Up @@ -684,10 +687,22 @@ private ConnectorNodePartitioningProvider getPartitioningProvider(PartitioningHa
private PlanWithProperties planTableScan(TableScanNode node, RowExpression predicate)
{
PlanNode plan = pushPredicateIntoTableScan(node, predicate, true, session, idAllocator, metadata);
// Presto Java and Presto Native use different hash functions for partitioning
// An additional exchange makes sure the data flows through a native worker in case it need to be partitioned for downstream processing
if (isNativeExecutionEnabled(session) && containsSystemTableScan(plan)) {
plan = gatheringExchange(idAllocator.getNextId(), REMOTE_STREAMING, plan);
}
// TODO: Support selecting layout with best local property once connector can participate in query optimization.
return new PlanWithProperties(plan, derivePropertiesRecursively(plan));
}

private boolean containsSystemTableScan(PlanNode plan)
{
return searchFrom(plan)
.where(planNode -> planNode instanceof TableScanNode && isInternalSystemConnector(((TableScanNode) planNode).getTable().getConnectorId()))
.matches();
}

@Override
public PlanWithProperties visitValues(ValuesNode node, PreferredProperties preferredProperties)
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
import java.util.Map;
import java.util.UUID;

import static com.facebook.presto.SystemSessionProperties.JOIN_DISTRIBUTION_TYPE;
import static com.facebook.presto.common.type.BigintType.BIGINT;
import static com.facebook.presto.nativeworker.NativeQueryRunnerUtils.createBucketedCustomer;
import static com.facebook.presto.nativeworker.NativeQueryRunnerUtils.createBucketedLineitemAndOrders;
Expand All @@ -37,6 +38,13 @@
import static com.facebook.presto.nativeworker.NativeQueryRunnerUtils.createPrestoBenchTables;
import static com.facebook.presto.nativeworker.NativeQueryRunnerUtils.createRegion;
import static com.facebook.presto.nativeworker.NativeQueryRunnerUtils.createSupplier;
import static com.facebook.presto.sql.planner.assertions.PlanMatchPattern.anyTree;
import static com.facebook.presto.sql.planner.assertions.PlanMatchPattern.exchange;
import static com.facebook.presto.sql.planner.assertions.PlanMatchPattern.join;
import static com.facebook.presto.sql.planner.assertions.PlanMatchPattern.tableScan;
import static com.facebook.presto.sql.planner.plan.ExchangeNode.Scope.REMOTE_STREAMING;
import static com.facebook.presto.sql.planner.plan.ExchangeNode.Type.GATHER;
import static com.facebook.presto.sql.planner.plan.ExchangeNode.Type.REPARTITION;
import static java.lang.String.format;
import static org.testng.Assert.assertEquals;

Expand Down Expand Up @@ -1192,6 +1200,46 @@ public void testDereference()
assertQuery("SELECT r[2] FROM (VALUES (ROW (ROW (1, 'a', true)))) AS v(r)");
}

@Test
public void testSystemTables()
{
String tableName = generateRandomTableName();
String partitionsTableName = format("%s$partitions", tableName);

try {
getQueryRunner().execute(format("CREATE TABLE %s " +
"WITH (partitioned_by = ARRAY['regionkey']) " +
"AS " +
"SELECT nationkey, name, comment, regionkey FROM nation", tableName));

String join = format("SELECT * " +
"FROM " +
" (SELECT DISTINCT regionkey FROM %s) t " +
"INNER JOIN " +
" (SELECT regionkey FROM \"%s\") p " +
"ON t.regionkey = p.regionkey", tableName, partitionsTableName);

Session session = Session.builder(getSession())
.setSystemProperty(JOIN_DISTRIBUTION_TYPE, "PARTITIONED")
.build();
assertPlan(
session,
join,
anyTree(
join(
anyTree(tableScan(tableName)),
anyTree(
exchange(REMOTE_STREAMING, REPARTITION,
exchange(REMOTE_STREAMING, GATHER,
anyTree(
tableScan(partitionsTableName))))))));
assertQuery(session, join);
}
finally {
dropTableIfExists(tableName);
}
}

private void assertQueryResultCount(String sql, int expectedResultCount)
{
assertEquals(getQueryRunner().execute(sql).getRowCount(), expectedResultCount);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -404,7 +404,6 @@ private Session buildSessionForTableWrite()
.setSystemProperty("task_writer_count", "1")
.setSystemProperty("task_partitioned_writer_count", "2")
.setCatalogSessionProperty("hive", "collect_column_statistics_on_write", "true")
.setCatalogSessionProperty("hive", "optimized_partition_update_serialization_enabled", "false")
.setCatalogSessionProperty("hive", "orc_compression_codec", "ZSTD")
.build();
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,9 @@ public void testInformationSchemaTables() {}
@Ignore
public void testShowAndDescribe() {}

@Override
public void testSystemTables() {}

// @TODO Refer https://github.com/prestodb/presto/issues/20294
@Override
@Ignore
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,6 @@ public void testAggregationCompanionFunction()
Session session = Session.builder(getSession())
.setSystemProperty("table_writer_merge_operator_enabled", "false")
.setCatalogSessionProperty("hive", "collect_column_statistics_on_write", "false")
.setCatalogSessionProperty("hive", "optimized_partition_update_serialization_enabled", "false")
.setCatalogSessionProperty("hive", "orc_compression_codec", "ZSTD")
.build();
try {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -224,7 +224,16 @@ public static PrestoSparkQueryRunner createSpilledHivePrestoSparkQueryRunner(Ite
public static PrestoSparkQueryRunner createHivePrestoSparkQueryRunner(Iterable<TpchTable<?>> tables, Map<String, String> additionalConfigProperties, Map<String, String> hiveProperties, Optional<Path> dataDirectory)
{
PrestoSparkQueryRunner queryRunner = new PrestoSparkQueryRunner(
"hive", additionalConfigProperties, hiveProperties, ImmutableMap.of(), dataDirectory, ImmutableList.of(new NativeExecutionModule()), DEFAULT_AVAILABLE_CPU_COUNT);
"hive",
additionalConfigProperties,
ImmutableMap.<String, String>builder()
.putAll(hiveProperties)
.put("hive.experimental-optimized-partition-update-serialization-enabled", "true")
.build(),
ImmutableMap.of(),
dataDirectory,
ImmutableList.of(new NativeExecutionModule()),
DEFAULT_AVAILABLE_CPU_COUNT);
ExtendedHiveMetastore metastore = queryRunner.getMetastore();
if (!metastore.getDatabase(METASTORE_CONTEXT, "tpch").isPresent()) {
metastore.createDatabase(METASTORE_CONTEXT, createDatabaseMetastoreObject("tpch"));
Expand Down Expand Up @@ -369,7 +378,6 @@ public PrestoSparkQueryRunner(
pluginManager.installPlugin(new HivePlugin("hive", Optional.of(metastore)));

Map<String, String> properties = ImmutableMap.<String, String>builder()
.put("hive.experimental-optimized-partition-update-serialization-enabled", "true")
.put("hive.allow-drop-table", "true")
.put("hive.allow-rename-table", "true")
.put("hive.allow-rename-column", "true")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -460,7 +460,7 @@ private int getRandomCoordinatorIndex()
@Override
public int getNodeCount()
{
return servers.size();
return servers.size() + externalWorkers.size();
}

@Override
Expand Down