Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -300,6 +300,7 @@ public final class SystemSessionProperties
public static final String TRACK_PARTIAL_AGGREGATION_HISTORY = "track_partial_aggregation_history";
public static final String REMOVE_REDUNDANT_CAST_TO_VARCHAR_IN_JOIN = "remove_redundant_cast_to_varchar_in_join";
public static final String HANDLE_COMPLEX_EQUI_JOINS = "handle_complex_equi_joins";
public static final String SKIP_HASH_GENERATION_FOR_JOIN_WITH_TABLE_SCAN_INPUT = "skip_hash_generation_for_join_with_table_scan_input";

// TODO: Native execution related session properties that are temporarily put here. They will be relocated in the future.
public static final String NATIVE_SIMPLIFIED_EXPRESSION_EVALUATION_ENABLED = "native_simplified_expression_evaluation_enabled";
Expand Down Expand Up @@ -1805,6 +1806,11 @@ public SystemSessionProperties(
HANDLE_COMPLEX_EQUI_JOINS,
"Handle complex equi-join conditions to open up join space for join reordering",
featuresConfig.getHandleComplexEquiJoins(),
false),
booleanProperty(
SKIP_HASH_GENERATION_FOR_JOIN_WITH_TABLE_SCAN_INPUT,
"Skip hash generation for join, when input is table scan node",
featuresConfig.isSkipHashGenerationForJoinWithTableScanInput(),
false));
}

Expand Down Expand Up @@ -3014,4 +3020,9 @@ public static boolean shouldHandleComplexEquiJoins(Session session)
{
return session.getSystemProperty(HANDLE_COMPLEX_EQUI_JOINS, Boolean.class);
}

public static boolean skipHashGenerationForJoinWithTableScanInput(Session session)
{
return session.getSystemProperty(SKIP_HASH_GENERATION_FOR_JOIN_WITH_TABLE_SCAN_INPUT, Boolean.class);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -291,6 +291,7 @@ public class FeaturesConfig
private boolean trackPartialAggregationHistory = true;

private boolean removeRedundantCastToVarcharInJoin = true;
private boolean skipHashGenerationForJoinWithTableScanInput;

public enum PartitioningPrecisionStrategy
{
Expand Down Expand Up @@ -2901,4 +2902,17 @@ public FeaturesConfig setHandleComplexEquiJoins(boolean handleComplexEquiJoins)
this.handleComplexEquiJoins = handleComplexEquiJoins;
return this;
}

public boolean isSkipHashGenerationForJoinWithTableScanInput()
{
return skipHashGenerationForJoinWithTableScanInput;
}

@Config("optimizer.skip-hash-generation-for-join-with-table-scan-input")
@ConfigDescription("Skip hash generation for join, when input is table scan node")
public FeaturesConfig setSkipHashGenerationForJoinWithTableScanInput(boolean skipHashGenerationForJoinWithTableScanInput)
{
this.skipHashGenerationForJoinWithTableScanInput = skipHashGenerationForJoinWithTableScanInput;
return this;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
import com.facebook.presto.spi.plan.PlanNode;
import com.facebook.presto.spi.plan.PlanNodeIdAllocator;
import com.facebook.presto.spi.plan.ProjectNode;
import com.facebook.presto.spi.plan.TableScanNode;
import com.facebook.presto.spi.plan.UnionNode;
import com.facebook.presto.spi.relation.CallExpression;
import com.facebook.presto.spi.relation.RowExpression;
Expand Down Expand Up @@ -65,6 +66,7 @@
import java.util.Set;
import java.util.function.Function;

import static com.facebook.presto.SystemSessionProperties.skipHashGenerationForJoinWithTableScanInput;
import static com.facebook.presto.common.type.BigintType.BIGINT;
import static com.facebook.presto.spi.plan.ProjectNode.Locality.LOCAL;
import static com.facebook.presto.spi.plan.ProjectNode.Locality.REMOTE;
Expand Down Expand Up @@ -121,7 +123,7 @@ public PlanOptimizerResult optimize(PlanNode plan, Session session, TypeProvider
requireNonNull(variableAllocator, "variableAllocator is null");
requireNonNull(idAllocator, "idAllocator is null");
if (isEnabled(session)) {
PlanWithProperties result = new Rewriter(idAllocator, variableAllocator, functionAndTypeManager).accept(plan, new HashComputationSet());
PlanWithProperties result = new Rewriter(idAllocator, variableAllocator, functionAndTypeManager, session).accept(plan, new HashComputationSet());
return PlanOptimizerResult.optimizerResult(result.getNode(), true);
}
return PlanOptimizerResult.optimizerResult(plan, false);
Expand All @@ -133,12 +135,14 @@ private static class Rewriter
private final PlanNodeIdAllocator idAllocator;
private final VariableAllocator variableAllocator;
private final FunctionAndTypeManager functionAndTypeManager;
private final Session session;

private Rewriter(PlanNodeIdAllocator idAllocator, VariableAllocator variableAllocator, FunctionAndTypeManager functionAndTypeManager)
private Rewriter(PlanNodeIdAllocator idAllocator, VariableAllocator variableAllocator, FunctionAndTypeManager functionAndTypeManager, Session session)
{
this.idAllocator = requireNonNull(idAllocator, "idAllocator is null");
this.variableAllocator = requireNonNull(variableAllocator, "variableAllocator is null");
this.functionAndTypeManager = requireNonNull(functionAndTypeManager, "functionManager is null");
this.session = requireNonNull(session, "session is null");
}

@Override
Expand Down Expand Up @@ -315,6 +319,11 @@ public PlanWithProperties visitTopNRowNumber(TopNRowNumberNode node, HashComputa
child.getHashVariables());
}

private boolean skipHashComputeForJoinInput(PlanNode node, Optional<HashComputation> hashComputation, HashComputationSet parentPreference)
{
return node instanceof TableScanNode && hashComputation.isPresent() && hashComputation.get().isSingleBigIntVariable() && !parentPreference.getHashes().contains(hashComputation.get());
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm curious why we would ever add this hash computation when the parent does not require it. I.e. wouldn't this check simply always be
"return hashComputation.isPresent() && !parentPreference.getHashes().contains(hashComputation.get());"

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This optimization is based on our observation, where TableScan below join is significantly than ScanProject (here project is for hash generation) for big int join key. Do not observe the same for other cases.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

what about other operators like filter/project on top of table scan or values

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In verifier suite, didn't observe same performance improvement for these cases, hence limit to the specific case where I see most significant performance improvement here.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This was similar to my question above.
If you look at this comment in the code, it seems to suggest that aggregations in general perform better for BIGINT's without the generated hash. I suspect the same principle applies to joins. I wonder if we are special casing this too much in adding the TableScanNode check.
Would it be possible to share the benchmarks you are seeing this behavior on?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If you look at this comment in the code, it seems to suggest that aggregations in general perform better for BIGINT's without the generated hash.

I see, this is because we have custom group by hash BigintGroupByHash for group by on single big int column, which does not utilize existing pre-computed hash. Not sure if join will see the same pattern, but currently we do not have specialized join hash implementation for bigint like group by. Hence what applied to group by may not be available to join here.

Would it be possible to share the benchmarks you are seeing this behavior on?

The benchmarks is based on production queries and cannot be shared.
But the queries which improve most is the queries which have table scan as input source of join. The plan changed from join <- ScanProject to join <- TableScan, and the biggest savings are from changing TableScan to ScanProject, especially when the input table is huge. And this is why I want to specialize for this case in this optimization here.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Fair enough. We can always relax this constraint in the future if needed

}

@Override
public PlanWithProperties visitJoin(JoinNode node, HashComputationSet parentPreference)
{
Expand All @@ -333,13 +342,19 @@ public PlanWithProperties visitJoin(JoinNode node, HashComputationSet parentPref
// join does not pass through preferred hash variables since they take more memory and since
// the join node filters, may take more compute
Optional<HashComputation> leftHashComputation = computeHash(Lists.transform(clauses, JoinNode.EquiJoinClause::getLeft), functionAndTypeManager);
if (skipHashGenerationForJoinWithTableScanInput(session) && skipHashComputeForJoinInput(node.getLeft(), leftHashComputation, parentPreference)) {
leftHashComputation = Optional.empty();
}
PlanWithProperties left = planAndEnforce(node.getLeft(), new HashComputationSet(leftHashComputation), true, new HashComputationSet(leftHashComputation));
VariableReferenceExpression leftHashVariable = left.getRequiredHashVariable(leftHashComputation.get());
Optional<VariableReferenceExpression> leftHashVariable = leftHashComputation.isPresent() ? Optional.of(left.getRequiredHashVariable(leftHashComputation.get())) : Optional.empty();

Optional<HashComputation> rightHashComputation = computeHash(Lists.transform(clauses, JoinNode.EquiJoinClause::getRight), functionAndTypeManager);
if (skipHashGenerationForJoinWithTableScanInput(session) && skipHashComputeForJoinInput(node.getRight(), rightHashComputation, parentPreference)) {
rightHashComputation = Optional.empty();
}
// drop undesired hash variables from build to save memory
PlanWithProperties right = planAndEnforce(node.getRight(), new HashComputationSet(rightHashComputation), true, new HashComputationSet(rightHashComputation));
VariableReferenceExpression rightHashVariable = right.getRequiredHashVariable(rightHashComputation.get());
Optional<VariableReferenceExpression> rightHashVariable = rightHashComputation.isPresent() ? Optional.of(right.getRequiredHashVariable(rightHashComputation.get())) : Optional.empty();

// build map of all hash variables
// NOTE: Full outer join doesn't use hash variables
Expand All @@ -351,7 +366,7 @@ public PlanWithProperties visitJoin(JoinNode node, HashComputationSet parentPref
allHashVariables.putAll(right.getHashVariables());
}

return buildJoinNodeWithPreferredHashes(node, left, right, allHashVariables, parentPreference, Optional.of(leftHashVariable), Optional.of(rightHashVariable));
return buildJoinNodeWithPreferredHashes(node, left, right, allHashVariables, parentPreference, leftHashVariable, rightHashVariable);
}

private PlanWithProperties buildJoinNodeWithPreferredHashes(
Expand Down Expand Up @@ -929,6 +944,11 @@ public boolean canComputeWith(Set<VariableReferenceExpression> availableFields)
return availableFields.containsAll(fields);
}

public boolean isSingleBigIntVariable()
{
return fields.size() == 1 && Iterables.getOnlyElement(fields).getType().equals(BIGINT);
}

private RowExpression getHashExpression()
{
RowExpression hashExpression = constant(INITIAL_HASH_VALUE, BIGINT);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -253,7 +253,8 @@ public void testDefaults()
.setRewriteConstantArrayContainsToInEnabled(false)
.setUseHBOForScaledWriters(false)
.setRemoveRedundantCastToVarcharInJoin(true)
.setHandleComplexEquiJoins(false));
.setHandleComplexEquiJoins(false)
.setSkipHashGenerationForJoinWithTableScanInput(false));
}

@Test
Expand Down Expand Up @@ -455,6 +456,7 @@ public void testExplicitPropertyMappings()
.put("optimizer.use-hbo-for-scaled-writers", "true")
.put("optimizer.remove-redundant-cast-to-varchar-in-join", "false")
.put("optimizer.handle-complex-equi-joins", "true")
.put("optimizer.skip-hash-generation-for-join-with-table-scan-input", "true")
.build();

FeaturesConfig expected = new FeaturesConfig()
Expand Down Expand Up @@ -653,7 +655,8 @@ public void testExplicitPropertyMappings()
.setRewriteConstantArrayContainsToInEnabled(true)
.setUseHBOForScaledWriters(true)
.setRemoveRedundantCastToVarcharInJoin(false)
.setHandleComplexEquiJoins(true);
.setHandleComplexEquiJoins(true)
.setSkipHashGenerationForJoinWithTableScanInput(true);
assertFullMapping(properties, expected);
}

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.presto.sql.planner.optimizations;

import com.facebook.presto.Session;
import com.facebook.presto.sql.planner.assertions.BasePlanTest;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import org.testng.annotations.Test;

import static com.facebook.presto.SystemSessionProperties.JOIN_DISTRIBUTION_TYPE;
import static com.facebook.presto.SystemSessionProperties.JOIN_REORDERING_STRATEGY;
import static com.facebook.presto.SystemSessionProperties.SKIP_HASH_GENERATION_FOR_JOIN_WITH_TABLE_SCAN_INPUT;
import static com.facebook.presto.sql.analyzer.FeaturesConfig.JoinDistributionType.BROADCAST;
import static com.facebook.presto.sql.analyzer.FeaturesConfig.JoinReorderingStrategy.ELIMINATE_CROSS_JOINS;
import static com.facebook.presto.sql.planner.assertions.PlanMatchPattern.anyTree;
import static com.facebook.presto.sql.planner.assertions.PlanMatchPattern.equiJoinClause;
import static com.facebook.presto.sql.planner.assertions.PlanMatchPattern.exchange;
import static com.facebook.presto.sql.planner.assertions.PlanMatchPattern.join;
import static com.facebook.presto.sql.planner.assertions.PlanMatchPattern.project;
import static com.facebook.presto.sql.planner.assertions.PlanMatchPattern.tableScan;
import static com.facebook.presto.sql.planner.plan.ExchangeNode.Scope.REMOTE_STREAMING;
import static com.facebook.presto.sql.planner.plan.ExchangeNode.Type.REPLICATE;
import static com.facebook.presto.sql.planner.plan.JoinNode.Type.INNER;

public class TestHashGenerationOptimizer
extends BasePlanTest
{
@Test
public void testSkipHashGenerationForJoinWithTableScanInput()
{
Session enable = Session.builder(this.getQueryRunner().getDefaultSession())
.setSystemProperty(JOIN_REORDERING_STRATEGY, ELIMINATE_CROSS_JOINS.name())
.setSystemProperty(JOIN_DISTRIBUTION_TYPE, BROADCAST.name())
.setSystemProperty(SKIP_HASH_GENERATION_FOR_JOIN_WITH_TABLE_SCAN_INPUT, "true")
.build();
assertPlanWithSession("select * from lineitem l join orders o on l.partkey=o.custkey",
enable,
false,
anyTree(
join(INNER, ImmutableList.of(equiJoinClause("partkey", "custkey")),
tableScan("lineitem", ImmutableMap.of("partkey", "partkey")),
anyTree(
exchange(REMOTE_STREAMING, REPLICATE,
anyTree(
tableScan("orders", ImmutableMap.of("custkey", "custkey"))))))));

Session disable = Session.builder(this.getQueryRunner().getDefaultSession())
.setSystemProperty(JOIN_REORDERING_STRATEGY, ELIMINATE_CROSS_JOINS.name())
.setSystemProperty(JOIN_DISTRIBUTION_TYPE, BROADCAST.name())
.setSystemProperty(SKIP_HASH_GENERATION_FOR_JOIN_WITH_TABLE_SCAN_INPUT, "false")
.build();
assertPlanWithSession("select * from lineitem l join orders o on l.partkey=o.custkey",
disable,
false,
anyTree(
join(INNER, ImmutableList.of(equiJoinClause("partkey", "custkey")),
project(
tableScan("lineitem", ImmutableMap.of("partkey", "partkey"))),
anyTree(
exchange(REMOTE_STREAMING, REPLICATE,
anyTree(
tableScan("orders", ImmutableMap.of("custkey", "custkey"))))))));
}
}