From 5c5ef1632a70b8029b2145e7ae2874bbbddb7627 Mon Sep 17 00:00:00 2001 From: Raunaq Morarka Date: Sat, 14 Jun 2025 17:48:41 +0530 Subject: [PATCH 1/6] Remove unused code for pre-computed hashes in aggregation nodes This code became unused after removal of HashGenerationOptimizer --- .../io/trino/operator/BigintGroupByHash.java | 19 +- .../trino/operator/DistinctLimitOperator.java | 22 +- .../io/trino/operator/FlatGroupByHash.java | 14 +- .../main/java/io/trino/operator/FlatHash.java | 27 +- .../java/io/trino/operator/GroupByHash.java | 10 +- .../io/trino/operator/GroupByHashMode.java | 9 +- .../operator/GroupByHashPageIndexer.java | 2 +- .../operator/HashAggregationOperator.java | 43 +-- .../io/trino/operator/MarkDistinctHash.java | 4 +- .../trino/operator/MarkDistinctOperator.java | 24 +- .../io/trino/operator/RowNumberOperator.java | 18 +- .../trino/operator/TopNRankingOperator.java | 21 +- .../DistinctAccumulatorFactory.java | 2 - .../DistinctWindowAccumulator.java | 1 - .../InMemoryHashAggregationBuilder.java | 28 +- .../MergingHashAggregationBuilder.java | 4 - .../SpillableHashAggregationBuilder.java | 5 - .../partial/SkipAggregationBuilder.java | 5 +- .../index/UnloadedIndexKeyRecordSet.java | 2 +- .../sql/planner/LocalExecutionPlanner.java | 20 +- .../io/trino/sql/planner/QueryPlanner.java | 3 +- .../rule/AddIntermediateAggregations.java | 1 - .../iterative/rule/DecorrelateUnnest.java | 4 +- .../iterative/rule/ImplementOffset.java | 1 - .../rule/MergeLimitWithDistinct.java | 3 +- ...ipleDistinctAggregationToMarkDistinct.java | 3 +- ...tipleDistinctAggregationsToSubqueries.java | 4 +- .../OptimizeMixedDistinctAggregations.java | 2 - .../rule/PreAggregateCaseAggregations.java | 1 - .../rule/PruneAggregationSourceColumns.java | 1 - .../rule/PruneDistinctLimitSourceColumns.java | 1 - .../rule/PruneMarkDistinctColumns.java | 3 +- .../iterative/rule/PruneRowNumberColumns.java | 3 +- .../rule/PruneTopNRankingColumns.java | 3 +- .../rule/PushAggregationThroughOuterJoin.java | 4 - ...PushPartialAggregationThroughExchange.java | 4 +- .../PushPartialAggregationThroughJoin.java | 6 - ...hPredicateThroughProjectIntoRowNumber.java | 3 +- ...PushPredicateThroughProjectIntoWindow.java | 4 +- .../rule/PushdownFilterIntoRowNumber.java | 3 +- .../rule/PushdownFilterIntoWindow.java | 3 +- .../rule/PushdownLimitIntoRowNumber.java | 3 +- .../rule/PushdownLimitIntoWindow.java | 3 +- .../rule/RemoveEmptyGlobalAggregation.java | 2 - .../rule/RemoveRedundantDistinctLimit.java | 3 - .../rule/ReplaceWindowWithRowNumber.java | 1 - ...dDistinctAggregationWithoutProjection.java | 2 - ...latedGroupedAggregationWithProjection.java | 1 - ...edGroupedAggregationWithoutProjection.java | 1 - .../TransformCorrelatedScalarSubquery.java | 5 +- .../planner/optimizations/AddExchanges.java | 13 +- .../optimizations/AddLocalExchanges.java | 3 +- .../planner/optimizations/LimitPushDown.java | 3 +- .../optimizations/PlanNodeDecorrelator.java | 9 +- .../planner/optimizations/SymbolMapper.java | 10 +- .../UnaliasSymbolReferences.java | 4 +- .../optimizations/WindowFilterPushDown.java | 7 +- .../sql/planner/plan/AggregationNode.java | 24 +- .../sql/planner/plan/DistinctLimitNode.java | 20 +- .../sql/planner/plan/MarkDistinctNode.java | 14 +- .../trino/sql/planner/plan/RowNumberNode.java | 14 +- .../sql/planner/plan/TopNRankingNode.java | 15 +- .../sql/planner/planprinter/PlanPrinter.java | 11 +- .../trino/cost/TestTopNRankingStatsRule.java | 10 - .../trino/operator/BenchmarkGroupByHash.java | 33 +-- .../BenchmarkGroupByHashOnSimulatedData.java | 2 +- ...kHashAndStreamingAggregationOperators.java | 4 +- .../operator/TestDistinctLimitOperator.java | 5 - .../io/trino/operator/TestGroupByHash.java | 77 ++--- .../operator/TestGroupedTopNRankBuilder.java | 2 +- .../TestGroupedTopNRowNumberBuilder.java | 2 +- .../operator/TestHashAggregationOperator.java | 276 +++++++----------- .../operator/TestMarkDistinctOperator.java | 4 +- .../trino/operator/TestRowNumberOperator.java | 253 ++++++++-------- .../operator/TestTopNRankingOperator.java | 5 - .../assertions/DistinctLimitMatcher.java | 10 +- .../assertions/MarkDistinctMatcher.java | 10 +- .../planner/assertions/PlanMatchPattern.java | 26 +- .../planner/assertions/RowNumberMatcher.java | 20 -- .../assertions/TopNRankingMatcher.java | 23 +- ...tipleDistinctAggregationsToSubqueries.java | 21 -- .../TestPruneAggregationSourceColumns.java | 7 +- .../TestPruneDistinctLimitSourceColumns.java | 15 +- .../rule/TestPruneMarkDistinctColumns.java | 11 +- .../rule/TestPruneOrderByInAggregation.java | 6 +- .../rule/TestPruneRowNumberColumns.java | 3 +- .../rule/TestPruneTopNRankingColumns.java | 9 +- .../rule/TestPushDownDereferencesRules.java | 1 - .../iterative/rule/test/PlanBuilder.java | 36 +-- .../TestAnonymizeJsonRepresentation.java | 3 +- .../planprinter/TestJsonRepresentation.java | 3 +- 91 files changed, 373 insertions(+), 1012 deletions(-) diff --git a/core/trino-main/src/main/java/io/trino/operator/BigintGroupByHash.java b/core/trino-main/src/main/java/io/trino/operator/BigintGroupByHash.java index df180db7684a..8a81bd23489e 100644 --- a/core/trino-main/src/main/java/io/trino/operator/BigintGroupByHash.java +++ b/core/trino-main/src/main/java/io/trino/operator/BigintGroupByHash.java @@ -21,7 +21,6 @@ import io.trino.spi.block.BlockBuilder; import io.trino.spi.block.DictionaryBlock; import io.trino.spi.block.RunLengthEncodedBlock; -import io.trino.spi.type.AbstractLongType; import io.trino.spi.type.BigintType; import java.util.Arrays; @@ -33,7 +32,6 @@ import static io.airlift.slice.SizeOf.sizeOf; import static io.trino.spi.StandardErrorCode.GENERIC_INSUFFICIENT_RESOURCES; import static io.trino.spi.type.BigintType.BIGINT; -import static io.trino.type.TypeUtils.NULL_HASH_CODE; import static it.unimi.dsi.fastutil.HashCommon.arraySize; import static it.unimi.dsi.fastutil.HashCommon.murmurHash3; import static java.lang.Math.min; @@ -48,8 +46,6 @@ public class BigintGroupByHash private static final float FILL_RATIO = 0.75f; - private final boolean outputRawHash; - private int hashCapacity; private int maxFill; private int mask; @@ -72,12 +68,10 @@ public class BigintGroupByHash private long preallocatedMemoryInBytes; private long currentPageSizeInBytes; - public BigintGroupByHash(boolean outputRawHash, int expectedSize, UpdateMemory updateMemory) + public BigintGroupByHash(int expectedSize, UpdateMemory updateMemory) { checkArgument(expectedSize > 0, "expectedSize must be greater than zero"); - this.outputRawHash = outputRawHash; - hashCapacity = arraySize(expectedSize, FILL_RATIO); maxFill = calculateMaxFill(hashCapacity); @@ -95,7 +89,6 @@ public BigintGroupByHash(boolean outputRawHash, int expectedSize, UpdateMemory u private BigintGroupByHash(BigintGroupByHash other) { - outputRawHash = other.outputRawHash; hashCapacity = other.hashCapacity; maxFill = other.maxFill; mask = other.mask; @@ -137,16 +130,6 @@ public void appendValuesTo(int groupId, PageBuilder pageBuilder) else { BIGINT.writeLong(blockBuilder, valuesByGroupId[groupId]); } - - if (outputRawHash) { - BlockBuilder hashBlockBuilder = pageBuilder.getBlockBuilder(1); - if (groupId == nullGroupId) { - BIGINT.writeLong(hashBlockBuilder, NULL_HASH_CODE); - } - else { - BIGINT.writeLong(hashBlockBuilder, AbstractLongType.hash(valuesByGroupId[groupId])); - } - } } @Override diff --git a/core/trino-main/src/main/java/io/trino/operator/DistinctLimitOperator.java b/core/trino-main/src/main/java/io/trino/operator/DistinctLimitOperator.java index 5a903c22d611..edfc31046d9b 100644 --- a/core/trino-main/src/main/java/io/trino/operator/DistinctLimitOperator.java +++ b/core/trino-main/src/main/java/io/trino/operator/DistinctLimitOperator.java @@ -22,7 +22,6 @@ import io.trino.sql.planner.plan.PlanNodeId; import java.util.List; -import java.util.Optional; import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.base.Preconditions.checkState; @@ -45,7 +44,6 @@ public static class DistinctLimitOperatorFactory private final List distinctChannels; private final List sourceTypes; private final long limit; - private final Optional hashChannel; private boolean closed; private final FlatHashStrategyCompiler hashStrategyCompiler; @@ -55,7 +53,6 @@ public DistinctLimitOperatorFactory( List sourceTypes, List distinctChannels, long limit, - Optional hashChannel, FlatHashStrategyCompiler hashStrategyCompiler) { this.operatorId = operatorId; @@ -65,7 +62,6 @@ public DistinctLimitOperatorFactory( checkArgument(limit >= 0, "limit must be at least zero"); this.limit = limit; - this.hashChannel = requireNonNull(hashChannel, "hashChannel is null"); this.hashStrategyCompiler = requireNonNull(hashStrategyCompiler, "hashStrategyCompiler is null"); } @@ -77,7 +73,7 @@ public Operator createOperator(DriverContext driverContext) List distinctTypes = distinctChannels.stream() .map(sourceTypes::get) .collect(toImmutableList()); - return new DistinctLimitOperator(operatorContext, distinctChannels, distinctTypes, limit, hashChannel, hashStrategyCompiler); + return new DistinctLimitOperator(operatorContext, distinctChannels, distinctTypes, limit, hashStrategyCompiler); } @Override @@ -89,7 +85,7 @@ public void noMoreOperators() @Override public OperatorFactory duplicate() { - return new DistinctLimitOperatorFactory(operatorId, planNodeId, sourceTypes, distinctChannels, limit, hashChannel, hashStrategyCompiler); + return new DistinctLimitOperatorFactory(operatorId, planNodeId, sourceTypes, distinctChannels, limit, hashStrategyCompiler); } } @@ -114,29 +110,17 @@ public DistinctLimitOperator( List distinctChannels, List distinctTypes, long limit, - Optional hashChannel, FlatHashStrategyCompiler hashStrategyCompiler) { this.operatorContext = requireNonNull(operatorContext, "operatorContext is null"); this.localUserMemoryContext = operatorContext.localUserMemoryContext(); checkArgument(limit >= 0, "limit must be at least zero"); checkArgument(distinctTypes.size() == distinctChannels.size(), "distinctTypes and distinctChannels sizes don't match"); - - if (hashChannel.isPresent()) { - this.inputChannels = new int[distinctChannels.size() + 1]; - for (int i = 0; i < distinctChannels.size(); i++) { - this.inputChannels[i] = distinctChannels.get(i); - } - this.inputChannels[distinctChannels.size()] = hashChannel.get(); - } - else { - this.inputChannels = Ints.toArray(distinctChannels); - } + this.inputChannels = Ints.toArray(distinctChannels); this.groupByHash = createGroupByHash( operatorContext.getSession(), distinctTypes, - hashChannel.isPresent(), false, toIntExact(min(limit, 10_000)), hashStrategyCompiler, diff --git a/core/trino-main/src/main/java/io/trino/operator/FlatGroupByHash.java b/core/trino-main/src/main/java/io/trino/operator/FlatGroupByHash.java index 1a06db18e111..746499d2c053 100644 --- a/core/trino-main/src/main/java/io/trino/operator/FlatGroupByHash.java +++ b/core/trino-main/src/main/java/io/trino/operator/FlatGroupByHash.java @@ -76,7 +76,7 @@ public FlatGroupByHash( checkArgument(expectedSize > 0, "expectedSize must be greater than zero"); - int totalChannels = hashTypes.size() + (hashMode.isHashPrecomputed() ? 1 : 0); + int totalChannels = hashTypes.size(); this.currentBlocks = new Block[totalChannels]; this.currentBlockBuilders = new BlockBuilder[totalChannels]; @@ -230,17 +230,7 @@ private void updateDictionaryLookBack(Block dictionary) private boolean canProcessDictionary(Block[] blocks) { - if (!processDictionary || !(blocks[0] instanceof DictionaryBlock inputDictionary)) { - return false; - } - - if (!hashMode.isHashPrecomputed()) { - return true; - } - - // dictionarySourceIds of data block and hash block must match - return blocks[1] instanceof DictionaryBlock hashDictionary && - hashDictionary.getDictionarySourceId().equals(inputDictionary.getDictionarySourceId()); + return processDictionary && blocks[0] instanceof DictionaryBlock; } private boolean canProcessLowCardinalityDictionary(Block[] blocks) diff --git a/core/trino-main/src/main/java/io/trino/operator/FlatHash.java b/core/trino-main/src/main/java/io/trino/operator/FlatHash.java index 048f94f96e7a..c18414ee04fa 100644 --- a/core/trino-main/src/main/java/io/trino/operator/FlatHash.java +++ b/core/trino-main/src/main/java/io/trino/operator/FlatHash.java @@ -27,7 +27,6 @@ import static io.airlift.slice.SizeOf.sizeOf; import static io.trino.operator.AppendOnlyVariableWidthData.getChunkOffset; import static io.trino.spi.StandardErrorCode.GENERIC_INSUFFICIENT_RESOURCES; -import static io.trino.spi.type.BigintType.BIGINT; import static java.lang.Math.addExact; import static java.lang.Math.max; import static java.lang.Math.multiplyExact; @@ -63,7 +62,6 @@ private static int calculateMaxFill(int capacity) private final AppendOnlyVariableWidthData variableWidthData; private final UpdateMemory checkMemoryReservation; - private final boolean hasPrecomputedHash; private final boolean cacheHashValue; private final int fixedRecordSize; private final int variableWidthOffset; @@ -87,7 +85,6 @@ public FlatHash(FlatHashStrategy flatHashStrategy, GroupByHashMode hashMode, int boolean hasVariableData = flatHashStrategy.isAnyVariableWidth(); this.variableWidthData = hasVariableData ? new AppendOnlyVariableWidthData() : null; requireNonNull(hashMode, "hashMode is null"); - this.hasPrecomputedHash = hashMode.isHashPrecomputed(); this.cacheHashValue = hashMode.isHashCached(); // the record is laid out as follows: @@ -114,7 +111,6 @@ public FlatHash(FlatHash other) this.flatHashStrategy = other.flatHashStrategy; this.checkMemoryReservation = other.checkMemoryReservation; this.variableWidthData = other.variableWidthData == null ? null : new AppendOnlyVariableWidthData(other.variableWidthData); - this.hasPrecomputedHash = other.hasPrecomputedHash; this.cacheHashValue = other.cacheHashValue; this.fixedRecordSize = other.fixedRecordSize; this.variableWidthOffset = other.variableWidthOffset; @@ -198,35 +194,16 @@ public void appendTo(int groupId, BlockBuilder[] blockBuilders) variableWidthChunk, variableChunkOffset, blockBuilders); - - if (hasPrecomputedHash) { - BIGINT.writeLong(blockBuilders[blockBuilders.length - 1], (long) LONG_HANDLE.get(fixedSizeRecords, recordOffset)); - } } public void computeHashes(Block[] blocks, long[] hashes, int offset, int length) { - if (hasPrecomputedHash) { - Block hashBlock = blocks[blocks.length - 1]; - for (int i = 0; i < length; i++) { - hashes[i] = BIGINT.getLong(hashBlock, offset + i); - } - } - else { - flatHashStrategy.hashBlocksBatched(blocks, hashes, offset, length); - } + flatHashStrategy.hashBlocksBatched(blocks, hashes, offset, length); } public int putIfAbsent(Block[] blocks, int position) { - long hash; - if (hasPrecomputedHash) { - hash = BIGINT.getLong(blocks[blocks.length - 1], position); - } - else { - hash = flatHashStrategy.hash(blocks, position); - } - + long hash = flatHashStrategy.hash(blocks, position); return putIfAbsent(blocks, position, hash); } diff --git a/core/trino-main/src/main/java/io/trino/operator/GroupByHash.java b/core/trino-main/src/main/java/io/trino/operator/GroupByHash.java index 5f5de5a5ebed..7807c66a4065 100644 --- a/core/trino-main/src/main/java/io/trino/operator/GroupByHash.java +++ b/core/trino-main/src/main/java/io/trino/operator/GroupByHash.java @@ -34,7 +34,6 @@ public interface GroupByHash static GroupByHash createGroupByHash( Session session, List types, - boolean hasPrecomputedHash, boolean spillable, int expectedSize, FlatHashStrategyCompiler hashStrategyCompiler, @@ -43,18 +42,15 @@ static GroupByHash createGroupByHash( boolean dictionaryAggregationEnabled = isDictionaryAggregationEnabled(session); return createGroupByHash( types, - selectGroupByHashMode(hasPrecomputedHash, spillable, types), + selectGroupByHashMode(spillable, types), expectedSize, dictionaryAggregationEnabled, hashStrategyCompiler, updateMemory); } - static GroupByHashMode selectGroupByHashMode(boolean hasPrecomputedHash, boolean spillable, List types) + static GroupByHashMode selectGroupByHashMode(boolean spillable, List types) { - if (hasPrecomputedHash) { - return GroupByHashMode.PRECOMPUTED; - } // Spillable aggregations should always cache hash values since spilling requires sorting by the hash value if (spillable) { return GroupByHashMode.CACHED; @@ -92,7 +88,7 @@ static GroupByHash createGroupByHash( UpdateMemory updateMemory) { if (types.size() == 1 && types.get(0).equals(BIGINT)) { - return new BigintGroupByHash(hashMode.isHashPrecomputed(), expectedSize, updateMemory); + return new BigintGroupByHash(expectedSize, updateMemory); } return new FlatGroupByHash( types, diff --git a/core/trino-main/src/main/java/io/trino/operator/GroupByHashMode.java b/core/trino-main/src/main/java/io/trino/operator/GroupByHashMode.java index af9ee24c983f..e456c1274134 100644 --- a/core/trino-main/src/main/java/io/trino/operator/GroupByHashMode.java +++ b/core/trino-main/src/main/java/io/trino/operator/GroupByHashMode.java @@ -14,8 +14,6 @@ package io.trino.operator; public enum GroupByHashMode { - // Hash values are pre-computed as input, and emitted as output - PRECOMPUTED, // Hash values are computed by the FlatGroupByHash instance and stored along with the entry. This consumes more // memory, but makes re-hashing cheaper by avoiding the need to re-compute each hash code and also makes the // valueIdentical check cheaper by avoiding a deep equality check when hashes don't match @@ -24,15 +22,10 @@ public enum GroupByHashMode { // table which saves memory, but can be more expensive during rehash. ON_DEMAND; - public boolean isHashPrecomputed() - { - return this == PRECOMPUTED; - } - public boolean isHashCached() { return switch (this) { - case PRECOMPUTED, CACHED -> true; + case CACHED -> true; case ON_DEMAND -> false; }; } diff --git a/core/trino-main/src/main/java/io/trino/operator/GroupByHashPageIndexer.java b/core/trino-main/src/main/java/io/trino/operator/GroupByHashPageIndexer.java index 507c37c63973..2657e372fbf7 100644 --- a/core/trino-main/src/main/java/io/trino/operator/GroupByHashPageIndexer.java +++ b/core/trino-main/src/main/java/io/trino/operator/GroupByHashPageIndexer.java @@ -31,7 +31,7 @@ public class GroupByHashPageIndexer public GroupByHashPageIndexer(List hashTypes, FlatHashStrategyCompiler hashStrategyCompiler) { - this(GroupByHash.createGroupByHash(hashTypes, selectGroupByHashMode(false, false, hashTypes), 20, false, hashStrategyCompiler, NOOP)); + this(GroupByHash.createGroupByHash(hashTypes, selectGroupByHashMode(false, hashTypes), 20, false, hashStrategyCompiler, NOOP)); } public GroupByHashPageIndexer(GroupByHash hash) diff --git a/core/trino-main/src/main/java/io/trino/operator/HashAggregationOperator.java b/core/trino-main/src/main/java/io/trino/operator/HashAggregationOperator.java index f5918084eddc..e86c4f71c026 100644 --- a/core/trino-main/src/main/java/io/trino/operator/HashAggregationOperator.java +++ b/core/trino-main/src/main/java/io/trino/operator/HashAggregationOperator.java @@ -24,10 +24,8 @@ import io.trino.operator.aggregation.builder.SpillableHashAggregationBuilder; import io.trino.operator.aggregation.partial.PartialAggregationController; import io.trino.operator.aggregation.partial.SkipAggregationBuilder; -import io.trino.operator.scalar.CombineHashFunction; import io.trino.spi.Page; import io.trino.spi.PageBuilder; -import io.trino.spi.type.BigintType; import io.trino.spi.type.Type; import io.trino.spiller.SpillerFactory; import io.trino.sql.planner.plan.AggregationNode.Step; @@ -40,10 +38,8 @@ import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.base.Preconditions.checkState; import static io.airlift.units.DataSize.Unit.MEGABYTE; -import static io.trino.operator.HashGenerator.INITIAL_HASH_VALUE; import static io.trino.operator.aggregation.builder.InMemoryHashAggregationBuilder.toTypes; import static io.trino.spi.type.BigintType.BIGINT; -import static io.trino.type.TypeUtils.NULL_HASH_CODE; import static java.util.Objects.requireNonNull; public class HashAggregationOperator @@ -62,7 +58,6 @@ public static class HashAggregationOperatorFactory private final Step step; private final boolean produceDefaultOutput; private final List aggregatorFactories; - private final Optional hashChannel; private final Optional groupIdChannel; private final int expectedGroups; @@ -85,7 +80,6 @@ public HashAggregationOperatorFactory( List globalAggregationGroupIds, Step step, List aggregatorFactories, - Optional hashChannel, Optional groupIdChannel, int expectedGroups, Optional maxPartialMemory, @@ -100,7 +94,6 @@ public HashAggregationOperatorFactory( step, false, aggregatorFactories, - hashChannel, groupIdChannel, expectedGroups, maxPartialMemory, @@ -123,7 +116,6 @@ public HashAggregationOperatorFactory( Step step, boolean produceDefaultOutput, List aggregatorFactories, - Optional hashChannel, Optional groupIdChannel, int expectedGroups, Optional maxPartialMemory, @@ -141,7 +133,6 @@ public HashAggregationOperatorFactory( step, produceDefaultOutput, aggregatorFactories, - hashChannel, groupIdChannel, expectedGroups, maxPartialMemory, @@ -163,7 +154,6 @@ public HashAggregationOperatorFactory( Step step, boolean produceDefaultOutput, List aggregatorFactories, - Optional hashChannel, Optional groupIdChannel, int expectedGroups, Optional maxPartialMemory, @@ -176,7 +166,6 @@ public HashAggregationOperatorFactory( { this.operatorId = operatorId; this.planNodeId = requireNonNull(planNodeId, "planNodeId is null"); - this.hashChannel = requireNonNull(hashChannel, "hashChannel is null"); this.groupIdChannel = requireNonNull(groupIdChannel, "groupIdChannel is null"); this.groupByTypes = ImmutableList.copyOf(groupByTypes); this.groupByChannels = ImmutableList.copyOf(groupByChannels); @@ -208,7 +197,6 @@ public Operator createOperator(DriverContext driverContext) step, produceDefaultOutput, aggregatorFactories, - hashChannel, groupIdChannel, expectedGroups, maxPartialMemory, @@ -238,7 +226,6 @@ public OperatorFactory duplicate() step, produceDefaultOutput, aggregatorFactories, - hashChannel, groupIdChannel, expectedGroups, maxPartialMemory, @@ -259,7 +246,6 @@ public OperatorFactory duplicate() private final Step step; private final boolean produceDefaultOutput; private final List aggregatorFactories; - private final Optional hashChannel; private final Optional groupIdChannel; private final int expectedGroups; private final Optional maxPartialMemory; @@ -293,7 +279,6 @@ private HashAggregationOperator( Step step, boolean produceDefaultOutput, List aggregatorFactories, - Optional hashChannel, Optional groupIdChannel, int expectedGroups, Optional maxPartialMemory, @@ -315,13 +300,12 @@ private HashAggregationOperator( this.groupByChannels = ImmutableList.copyOf(groupByChannels); this.globalAggregationGroupIds = ImmutableList.copyOf(globalAggregationGroupIds); this.aggregatorFactories = ImmutableList.copyOf(aggregatorFactories); - this.hashChannel = requireNonNull(hashChannel, "hashChannel is null"); this.groupIdChannel = requireNonNull(groupIdChannel, "groupIdChannel is null"); this.step = step; this.produceDefaultOutput = produceDefaultOutput; this.expectedGroups = expectedGroups; this.maxPartialMemory = requireNonNull(maxPartialMemory, "maxPartialMemory is null"); - this.types = toTypes(groupByTypes, aggregatorFactories, hashChannel); + this.types = toTypes(groupByTypes, aggregatorFactories); this.spillEnabled = spillEnabled; this.memoryLimitForMerge = requireNonNull(memoryLimitForMerge, "memoryLimitForMerge is null"); this.memoryLimitForMergeWithMemory = requireNonNull(memoryLimitForMergeWithMemory, "memoryLimitForMergeWithMemory is null"); @@ -385,7 +369,7 @@ public void addInput(Page page) .map(PartialAggregationController::isPartialAggregationDisabled) .orElse(false); if (step.isOutputPartial() && partialAggregationDisabled) { - aggregationBuilder = new SkipAggregationBuilder(groupByChannels, hashChannel, aggregatorFactories, memoryContext, aggregationMetrics); + aggregationBuilder = new SkipAggregationBuilder(groupByChannels, aggregatorFactories, memoryContext, aggregationMetrics); } else if (step.isOutputPartial() || !spillEnabled || !isSpillable()) { // TODO: We ignore spillEnabled here if any aggregate has ORDER BY clause or DISTINCT because they are not yet implemented for spilling. @@ -395,7 +379,6 @@ else if (step.isOutputPartial() || !spillEnabled || !isSpillable()) { expectedGroups, groupByTypes, groupByChannels, - hashChannel, false, // spillable operatorContext, maxPartialMemory, @@ -417,7 +400,6 @@ else if (step.isOutputPartial() || !spillEnabled || !isSpillable()) { expectedGroups, groupByTypes, groupByChannels, - hashChannel, operatorContext, memoryLimitForMerge, memoryLimitForMergeWithMemory, @@ -574,12 +556,6 @@ private Page getGlobalAggregationOutput() channel++; } - if (hashChannel.isPresent()) { - long hashValue = calculateDefaultOutputHash(groupByTypes, groupIdChannel.orElseThrow(), groupId); - BIGINT.writeLong(output.getBlockBuilder(channel), hashValue); - channel++; - } - for (AggregatorFactory aggregatorFactory : aggregatorFactories) { aggregatorFactory.createAggregator(aggregationMetrics).evaluate(output.getBlockBuilder(channel)); channel++; @@ -591,19 +567,4 @@ private Page getGlobalAggregationOutput() } return output.build(); } - - private static long calculateDefaultOutputHash(List groupByChannels, int groupIdChannel, int groupId) - { - // Default output has NULLs on all columns except of groupIdChannel - long result = INITIAL_HASH_VALUE; - for (int channel = 0; channel < groupByChannels.size(); channel++) { - if (channel != groupIdChannel) { - result = CombineHashFunction.getHash(result, NULL_HASH_CODE); - } - else { - result = CombineHashFunction.getHash(result, BigintType.hash(groupId)); - } - } - return result; - } } diff --git a/core/trino-main/src/main/java/io/trino/operator/MarkDistinctHash.java b/core/trino-main/src/main/java/io/trino/operator/MarkDistinctHash.java index d9844cc82fef..2ea8a11cddc2 100644 --- a/core/trino-main/src/main/java/io/trino/operator/MarkDistinctHash.java +++ b/core/trino-main/src/main/java/io/trino/operator/MarkDistinctHash.java @@ -31,9 +31,9 @@ public class MarkDistinctHash private final GroupByHash groupByHash; private long nextDistinctId; - public MarkDistinctHash(Session session, List types, boolean hasPrecomputedHash, FlatHashStrategyCompiler hashStrategyCompiler, UpdateMemory updateMemory) + public MarkDistinctHash(Session session, List types, FlatHashStrategyCompiler hashStrategyCompiler, UpdateMemory updateMemory) { - this.groupByHash = createGroupByHash(session, types, hasPrecomputedHash, false, 10_000, hashStrategyCompiler, updateMemory); + this.groupByHash = createGroupByHash(session, types, false, 10_000, hashStrategyCompiler, updateMemory); } private MarkDistinctHash(MarkDistinctHash other) diff --git a/core/trino-main/src/main/java/io/trino/operator/MarkDistinctOperator.java b/core/trino-main/src/main/java/io/trino/operator/MarkDistinctOperator.java index a2b809871b5e..010a7f50dd87 100644 --- a/core/trino-main/src/main/java/io/trino/operator/MarkDistinctOperator.java +++ b/core/trino-main/src/main/java/io/trino/operator/MarkDistinctOperator.java @@ -24,7 +24,6 @@ import java.util.Collection; import java.util.List; -import java.util.Optional; import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.base.Preconditions.checkState; @@ -39,7 +38,6 @@ public static class MarkDistinctOperatorFactory { private final int operatorId; private final PlanNodeId planNodeId; - private final Optional hashChannel; private final List markDistinctChannels; private final List types; private final FlatHashStrategyCompiler hashStrategyCompiler; @@ -50,14 +48,12 @@ public MarkDistinctOperatorFactory( PlanNodeId planNodeId, List sourceTypes, Collection markDistinctChannels, - Optional hashChannel, FlatHashStrategyCompiler hashStrategyCompiler) { this.operatorId = operatorId; this.planNodeId = requireNonNull(planNodeId, "planNodeId is null"); this.markDistinctChannels = ImmutableList.copyOf(requireNonNull(markDistinctChannels, "markDistinctChannels is null")); checkArgument(!markDistinctChannels.isEmpty(), "markDistinctChannels is empty"); - this.hashChannel = requireNonNull(hashChannel, "hashChannel is null"); this.hashStrategyCompiler = requireNonNull(hashStrategyCompiler, "hashStrategyCompiler is null"); this.types = ImmutableList.builder() .addAll(sourceTypes) @@ -70,7 +66,7 @@ public Operator createOperator(DriverContext driverContext) { checkState(!closed, "Factory is already closed"); OperatorContext operatorContext = driverContext.addOperatorContext(operatorId, planNodeId, MarkDistinctOperator.class.getSimpleName()); - return new MarkDistinctOperator(operatorContext, types, markDistinctChannels, hashChannel, hashStrategyCompiler); + return new MarkDistinctOperator(operatorContext, types, markDistinctChannels, hashStrategyCompiler); } @Override @@ -82,7 +78,7 @@ public void noMoreOperators() @Override public OperatorFactory duplicate() { - return new MarkDistinctOperatorFactory(operatorId, planNodeId, types.subList(0, types.size() - 1), markDistinctChannels, hashChannel, hashStrategyCompiler); + return new MarkDistinctOperatorFactory(operatorId, planNodeId, types.subList(0, types.size() - 1), markDistinctChannels, hashStrategyCompiler); } } @@ -97,29 +93,19 @@ public OperatorFactory duplicate() // for yield when memory is not available private Work unfinishedWork; - public MarkDistinctOperator(OperatorContext operatorContext, List types, List markDistinctChannels, Optional hashChannel, FlatHashStrategyCompiler hashStrategyCompiler) + public MarkDistinctOperator(OperatorContext operatorContext, List types, List markDistinctChannels, FlatHashStrategyCompiler hashStrategyCompiler) { this.operatorContext = requireNonNull(operatorContext, "operatorContext is null"); - requireNonNull(hashChannel, "hashChannel is null"); requireNonNull(markDistinctChannels, "markDistinctChannels is null"); ImmutableList.Builder distinctTypes = ImmutableList.builder(); for (int channel : markDistinctChannels) { distinctTypes.add(types.get(channel)); } - if (hashChannel.isPresent()) { - this.markDistinctChannels = new int[markDistinctChannels.size() + 1]; - for (int i = 0; i < markDistinctChannels.size(); i++) { - this.markDistinctChannels[i] = markDistinctChannels.get(i); - } - this.markDistinctChannels[markDistinctChannels.size()] = hashChannel.get(); - } - else { - this.markDistinctChannels = Ints.toArray(markDistinctChannels); - } + this.markDistinctChannels = Ints.toArray(markDistinctChannels); - this.markDistinctHash = new MarkDistinctHash(operatorContext.getSession(), distinctTypes.build(), hashChannel.isPresent(), hashStrategyCompiler, this::updateMemoryReservation); + this.markDistinctHash = new MarkDistinctHash(operatorContext.getSession(), distinctTypes.build(), hashStrategyCompiler, this::updateMemoryReservation); this.localUserMemoryContext = operatorContext.localUserMemoryContext(); } diff --git a/core/trino-main/src/main/java/io/trino/operator/RowNumberOperator.java b/core/trino-main/src/main/java/io/trino/operator/RowNumberOperator.java index 48e8b7b485f1..09fbaeb1a03a 100644 --- a/core/trino-main/src/main/java/io/trino/operator/RowNumberOperator.java +++ b/core/trino-main/src/main/java/io/trino/operator/RowNumberOperator.java @@ -49,7 +49,6 @@ public static class RowNumberOperatorFactory private final List outputChannels; private final List partitionChannels; private final List partitionTypes; - private final Optional hashChannel; private final int expectedPositions; private boolean closed; private final FlatHashStrategyCompiler hashStrategyCompiler; @@ -62,7 +61,6 @@ public RowNumberOperatorFactory( List partitionChannels, List partitionTypes, Optional maxRowsPerPartition, - Optional hashChannel, int expectedPositions, FlatHashStrategyCompiler hashStrategyCompiler) { @@ -74,7 +72,6 @@ public RowNumberOperatorFactory( this.partitionTypes = ImmutableList.copyOf(requireNonNull(partitionTypes, "partitionTypes is null")); this.maxRowsPerPartition = requireNonNull(maxRowsPerPartition, "maxRowsPerPartition is null"); - this.hashChannel = requireNonNull(hashChannel, "hashChannel is null"); checkArgument(expectedPositions > 0, "expectedPositions < 0"); this.expectedPositions = expectedPositions; this.hashStrategyCompiler = requireNonNull(hashStrategyCompiler, "hashStrategyCompiler is null"); @@ -93,7 +90,6 @@ public Operator createOperator(DriverContext driverContext) partitionChannels, partitionTypes, maxRowsPerPartition, - hashChannel, expectedPositions, hashStrategyCompiler); } @@ -115,7 +111,6 @@ public OperatorFactory duplicate() partitionChannels, partitionTypes, maxRowsPerPartition, - hashChannel, expectedPositions, hashStrategyCompiler); } @@ -149,7 +144,6 @@ public RowNumberOperator( List partitionChannels, List partitionTypes, Optional maxRowsPerPartition, - Optional hashChannel, int expectedPositions, FlatHashStrategyCompiler hashStrategyCompiler) { @@ -171,20 +165,10 @@ public RowNumberOperator( this.groupByHash = Optional.empty(); } else { - if (hashChannel.isPresent()) { - this.groupByChannels = new int[partitionChannels.size() + 1]; - for (int i = 0; i < partitionChannels.size(); i++) { - this.groupByChannels[i] = partitionChannels.get(i); - } - this.groupByChannels[partitionChannels.size()] = hashChannel.get(); - } - else { - this.groupByChannels = Ints.toArray(partitionChannels); - } + this.groupByChannels = Ints.toArray(partitionChannels); this.groupByHash = Optional.of(createGroupByHash( operatorContext.getSession(), partitionTypes, - hashChannel.isPresent(), false, expectedPositions, hashStrategyCompiler, diff --git a/core/trino-main/src/main/java/io/trino/operator/TopNRankingOperator.java b/core/trino-main/src/main/java/io/trino/operator/TopNRankingOperator.java index 4b2aae004fee..f47e03347044 100644 --- a/core/trino-main/src/main/java/io/trino/operator/TopNRankingOperator.java +++ b/core/trino-main/src/main/java/io/trino/operator/TopNRankingOperator.java @@ -55,7 +55,6 @@ public static class TopNRankingOperatorFactory private final List sortChannels; private final int maxRowCountPerPartition; private final boolean partial; - private final Optional hashChannel; private final int expectedPositions; private final boolean generateRanking; @@ -76,7 +75,6 @@ public TopNRankingOperatorFactory( List sortChannels, int maxRowCountPerPartition, boolean partial, - Optional hashChannel, int expectedPositions, Optional maxPartialMemory, FlatHashStrategyCompiler hashStrategyCompiler, @@ -91,7 +89,6 @@ public TopNRankingOperatorFactory( this.partitionChannels = ImmutableList.copyOf(requireNonNull(partitionChannels, "partitionChannels is null")); this.partitionTypes = ImmutableList.copyOf(requireNonNull(partitionTypes, "partitionTypes is null")); this.sortChannels = ImmutableList.copyOf(requireNonNull(sortChannels)); - this.hashChannel = requireNonNull(hashChannel, "hashChannel is null"); this.partial = partial; checkArgument(maxRowCountPerPartition > 0, "maxRowCountPerPartition must be > 0"); this.maxRowCountPerPartition = maxRowCountPerPartition; @@ -119,7 +116,6 @@ public Operator createOperator(DriverContext driverContext) sortChannels, maxRowCountPerPartition, generateRanking, - hashChannel, expectedPositions, maxPartialMemory, hashStrategyCompiler, @@ -147,7 +143,6 @@ public OperatorFactory duplicate() sortChannels, maxRowCountPerPartition, partial, - hashChannel, expectedPositions, maxPartialMemory, hashStrategyCompiler, @@ -179,7 +174,6 @@ public TopNRankingOperator( List sortChannels, int maxRankingPerPartition, boolean generateRanking, - Optional hashChannel, int expectedPositions, Optional maxPartialMemory, FlatHashStrategyCompiler hashStrategyCompiler, @@ -205,17 +199,7 @@ public TopNRankingOperator( checkArgument(maxPartialMemory.isEmpty() || !generateRanking, "no partial memory on final TopN"); this.maxFlushableBytes = maxPartialMemory.map(DataSize::toBytes).orElse(Long.MAX_VALUE); - int[] groupByChannels; - if (hashChannel.isPresent()) { - groupByChannels = new int[partitionChannels.size() + 1]; - for (int i = 0; i < partitionChannels.size(); i++) { - groupByChannels[i] = partitionChannels.get(i); - } - groupByChannels[partitionChannels.size()] = hashChannel.get(); - } - else { - groupByChannels = Ints.toArray(partitionChannels); - } + int[] groupByChannels = Ints.toArray(partitionChannels); this.groupedTopNBuilderSupplier = getGroupedTopNBuilderSupplier( rankingType, @@ -229,7 +213,6 @@ public TopNRankingOperator( getGroupByHashSupplier( expectedPositions, partitionTypes, - hashChannel.isPresent(), operatorContext.getSession(), hashStrategyCompiler, this::updateMemoryReservation)); @@ -238,7 +221,6 @@ public TopNRankingOperator( private static Supplier getGroupByHashSupplier( int expectedPositions, List partitionTypes, - boolean hasPrecomputedHash, Session session, FlatHashStrategyCompiler hashStrategyCompiler, UpdateMemory updateMemory) @@ -250,7 +232,6 @@ private static Supplier getGroupByHashSupplier( return () -> createGroupByHash( session, partitionTypes, - hasPrecomputedHash, false, expectedPositions, hashStrategyCompiler, diff --git a/core/trino-main/src/main/java/io/trino/operator/aggregation/DistinctAccumulatorFactory.java b/core/trino-main/src/main/java/io/trino/operator/aggregation/DistinctAccumulatorFactory.java index d6b92a587e79..789693cecfef 100644 --- a/core/trino-main/src/main/java/io/trino/operator/aggregation/DistinctAccumulatorFactory.java +++ b/core/trino-main/src/main/java/io/trino/operator/aggregation/DistinctAccumulatorFactory.java @@ -113,7 +113,6 @@ private DistinctAccumulator( this.hash = new MarkDistinctHash( session, inputTypes, - false, hashStrategyCompiler, UpdateMemory.NOOP); } @@ -190,7 +189,6 @@ private DistinctGroupedAccumulator( .add(INTEGER) // group id column .addAll(inputTypes) .build(), - false, hashStrategyCompiler, UpdateMemory.NOOP); } diff --git a/core/trino-main/src/main/java/io/trino/operator/aggregation/DistinctWindowAccumulator.java b/core/trino-main/src/main/java/io/trino/operator/aggregation/DistinctWindowAccumulator.java index 68e5c51813f2..7334e96f3bb2 100644 --- a/core/trino-main/src/main/java/io/trino/operator/aggregation/DistinctWindowAccumulator.java +++ b/core/trino-main/src/main/java/io/trino/operator/aggregation/DistinctWindowAccumulator.java @@ -62,7 +62,6 @@ public DistinctWindowAccumulator( this.hash = new MarkDistinctHash( session, argumentTypes, - false, hashStrategyCompiler, UpdateMemory.NOOP); diff --git a/core/trino-main/src/main/java/io/trino/operator/aggregation/builder/InMemoryHashAggregationBuilder.java b/core/trino-main/src/main/java/io/trino/operator/aggregation/builder/InMemoryHashAggregationBuilder.java index 2c9d20455933..b9b9b83303f6 100644 --- a/core/trino-main/src/main/java/io/trino/operator/aggregation/builder/InMemoryHashAggregationBuilder.java +++ b/core/trino-main/src/main/java/io/trino/operator/aggregation/builder/InMemoryHashAggregationBuilder.java @@ -69,7 +69,6 @@ public InMemoryHashAggregationBuilder( int expectedGroups, List groupByTypes, List groupByChannels, - Optional hashChannel, boolean spillable, OperatorContext operatorContext, Optional maxPartialMemory, @@ -82,7 +81,6 @@ public InMemoryHashAggregationBuilder( expectedGroups, groupByTypes, groupByChannels, - hashChannel, spillable, operatorContext, maxPartialMemory, @@ -98,7 +96,6 @@ public InMemoryHashAggregationBuilder( int expectedGroups, List groupByTypes, List groupByChannels, - Optional hashChannel, boolean spillable, OperatorContext operatorContext, Optional maxPartialMemory, @@ -107,26 +104,12 @@ public InMemoryHashAggregationBuilder( UpdateMemory updateMemory, AggregationMetrics aggregationMetrics) { - if (hashChannel.isPresent()) { - this.groupByOutputTypes = ImmutableList.builderWithExpectedSize(groupByTypes.size() + 1) - .addAll(groupByTypes) - .add(BIGINT) - .build(); - this.groupByChannels = new int[groupByChannels.size() + 1]; - for (int i = 0; i < groupByChannels.size(); i++) { - this.groupByChannels[i] = groupByChannels.get(i); - } - this.groupByChannels[groupByChannels.size()] = hashChannel.get(); - } - else { - this.groupByOutputTypes = ImmutableList.copyOf(groupByTypes); - this.groupByChannels = Ints.toArray(groupByChannels); - } + this.groupByOutputTypes = ImmutableList.copyOf(groupByTypes); + this.groupByChannels = Ints.toArray(groupByChannels); this.groupByHash = createGroupByHash( operatorContext.getSession(), groupByTypes, - hashChannel.isPresent(), spillable, expectedGroups, hashStrategyCompiler, @@ -348,13 +331,10 @@ public int nextInt() }; } - public static List toTypes(List groupByType, List factories, Optional hashChannel) + public static List toTypes(List groupByType, List factories) { - ImmutableList.Builder types = ImmutableList.builderWithExpectedSize(groupByType.size() + (hashChannel.isPresent() ? 1 : 0) + factories.size()); + ImmutableList.Builder types = ImmutableList.builderWithExpectedSize(groupByType.size() + factories.size()); types.addAll(groupByType); - if (hashChannel.isPresent()) { - types.add(BIGINT); - } for (AggregatorFactory factory : factories) { types.add(factory.createAggregator(new AggregationMetrics()).getType()); } diff --git a/core/trino-main/src/main/java/io/trino/operator/aggregation/builder/MergingHashAggregationBuilder.java b/core/trino-main/src/main/java/io/trino/operator/aggregation/builder/MergingHashAggregationBuilder.java index 29da121104b5..d2814cd95a32 100644 --- a/core/trino-main/src/main/java/io/trino/operator/aggregation/builder/MergingHashAggregationBuilder.java +++ b/core/trino-main/src/main/java/io/trino/operator/aggregation/builder/MergingHashAggregationBuilder.java @@ -42,7 +42,6 @@ public class MergingHashAggregationBuilder private final AggregationNode.Step step; private final int expectedGroups; private final List groupByPartialChannels; - private final Optional hashChannel; private final OperatorContext operatorContext; private final WorkProcessor sortedPages; private InMemoryHashAggregationBuilder hashAggregationBuilder; @@ -58,7 +57,6 @@ public MergingHashAggregationBuilder( AggregationNode.Step step, int expectedGroups, List groupByTypes, - Optional hashChannel, OperatorContext operatorContext, WorkProcessor sortedPages, AggregatedMemoryContext aggregatedMemoryContext, @@ -76,7 +74,6 @@ public MergingHashAggregationBuilder( this.step = AggregationNode.Step.partialInput(step); this.expectedGroups = expectedGroups; this.groupByPartialChannels = groupByPartialChannels.build(); - this.hashChannel = hashChannel.isPresent() ? Optional.of(groupByTypes.size()) : hashChannel; this.operatorContext = operatorContext; this.sortedPages = sortedPages; this.groupByTypes = groupByTypes; @@ -150,7 +147,6 @@ private void rebuildHashAggregationBuilder() expectedGroups, groupByTypes, groupByPartialChannels, - hashChannel, false, // spillable operatorContext, Optional.of(DataSize.succinctBytes(0)), diff --git a/core/trino-main/src/main/java/io/trino/operator/aggregation/builder/SpillableHashAggregationBuilder.java b/core/trino-main/src/main/java/io/trino/operator/aggregation/builder/SpillableHashAggregationBuilder.java index 8d804fe76517..1ef820702889 100644 --- a/core/trino-main/src/main/java/io/trino/operator/aggregation/builder/SpillableHashAggregationBuilder.java +++ b/core/trino-main/src/main/java/io/trino/operator/aggregation/builder/SpillableHashAggregationBuilder.java @@ -62,7 +62,6 @@ public class SpillableHashAggregationBuilder private final int expectedGroups; private final List groupByTypes; private final List groupByChannels; - private final Optional hashChannel; private final OperatorContext operatorContext; private final LocalMemoryContext localUserMemoryContext; private final LocalMemoryContext localRevocableMemoryContext; @@ -87,7 +86,6 @@ public SpillableHashAggregationBuilder( int expectedGroups, List groupByTypes, List groupByChannels, - Optional hashChannel, OperatorContext operatorContext, DataSize memoryLimitForMerge, DataSize memoryLimitForMergeWithMemory, @@ -100,7 +98,6 @@ public SpillableHashAggregationBuilder( this.expectedGroups = expectedGroups; this.groupByTypes = groupByTypes; this.groupByChannels = groupByChannels; - this.hashChannel = hashChannel; this.operatorContext = operatorContext; this.localUserMemoryContext = operatorContext.localUserMemoryContext(); this.localRevocableMemoryContext = operatorContext.localRevocableMemoryContext(); @@ -324,7 +321,6 @@ private WorkProcessor mergeSortedPages(WorkProcessor sortedPages, lo step, expectedGroups, groupByTypes, - hashChannel, operatorContext, sortedPages, operatorContext.aggregateUserMemoryContext(), @@ -348,7 +344,6 @@ private void rebuildHashAggregationBuilder() expectedGroups, groupByTypes, groupByChannels, - hashChannel, true, // spillable operatorContext, Optional.of(DataSize.succinctBytes(0)), diff --git a/core/trino-main/src/main/java/io/trino/operator/aggregation/partial/SkipAggregationBuilder.java b/core/trino-main/src/main/java/io/trino/operator/aggregation/partial/SkipAggregationBuilder.java index a0e81251a320..152ea594dbdd 100644 --- a/core/trino-main/src/main/java/io/trino/operator/aggregation/partial/SkipAggregationBuilder.java +++ b/core/trino-main/src/main/java/io/trino/operator/aggregation/partial/SkipAggregationBuilder.java @@ -29,7 +29,6 @@ import jakarta.annotation.Nullable; import java.util.List; -import java.util.Optional; import static com.google.common.base.Preconditions.checkArgument; import static java.util.Objects.requireNonNull; @@ -51,18 +50,16 @@ public class SkipAggregationBuilder public SkipAggregationBuilder( List groupByChannels, - Optional inputHashChannel, List aggregatorFactories, LocalMemoryContext memoryContext, AggregationMetrics aggregationMetrics) { this.memoryContext = requireNonNull(memoryContext, "memoryContext is null"); this.aggregatorFactories = ImmutableList.copyOf(requireNonNull(aggregatorFactories, "aggregatorFactories is null")); - this.hashChannels = new int[groupByChannels.size() + (inputHashChannel.isPresent() ? 1 : 0)]; + this.hashChannels = new int[groupByChannels.size()]; for (int i = 0; i < groupByChannels.size(); i++) { hashChannels[i] = groupByChannels.get(i); } - inputHashChannel.ifPresent(channelIndex -> hashChannels[groupByChannels.size()] = channelIndex); this.aggregationMetrics = requireNonNull(aggregationMetrics, "aggregationMetrics is null"); } diff --git a/core/trino-main/src/main/java/io/trino/operator/index/UnloadedIndexKeyRecordSet.java b/core/trino-main/src/main/java/io/trino/operator/index/UnloadedIndexKeyRecordSet.java index b4058d91de2b..f72f51816a54 100644 --- a/core/trino-main/src/main/java/io/trino/operator/index/UnloadedIndexKeyRecordSet.java +++ b/core/trino-main/src/main/java/io/trino/operator/index/UnloadedIndexKeyRecordSet.java @@ -66,7 +66,7 @@ public UnloadedIndexKeyRecordSet( } ImmutableList.Builder builder = ImmutableList.builder(); - GroupByHash groupByHash = createGroupByHash(session, distinctChannelTypes, false, false, 10_000, hashStrategyCompiler, NOOP); + GroupByHash groupByHash = createGroupByHash(session, distinctChannelTypes, false, 10_000, hashStrategyCompiler, NOOP); for (UpdateRequest request : requests) { Page page = request.getPage(); diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/LocalExecutionPlanner.java b/core/trino-main/src/main/java/io/trino/sql/planner/LocalExecutionPlanner.java index 33ea7030aa9d..2e9d1290f299 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/LocalExecutionPlanner.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/LocalExecutionPlanner.java @@ -1040,7 +1040,6 @@ public PhysicalOperation visitRowNumber(RowNumberNode node, LocalExecutionPlanCo int channel = source.getTypes().size(); outputMappings.put(node.getRowNumberSymbol(), channel); - Optional hashChannel = node.getHashSymbol().map(channelGetter(source)); OperatorFactory operatorFactory = new RowNumberOperator.RowNumberOperatorFactory( context.getNextOperatorId(), node.getId(), @@ -1049,7 +1048,6 @@ public PhysicalOperation visitRowNumber(RowNumberNode node, LocalExecutionPlanCo partitionChannels, partitionTypes, node.getMaxRowCountPerPartition(), - hashChannel, 10_000, hashStrategyCompiler); return new PhysicalOperation(operatorFactory, outputMappings.buildOrThrow(), source); @@ -1080,7 +1078,6 @@ public PhysicalOperation visitTopNRanking(TopNRankingNode node, LocalExecutionPl outputChannels.add(i); } - Optional hashChannel = node.getHashSymbol().map(channelGetter(source)); boolean isPartial = node.isPartial(); Optional maxPartialTopNMemorySize = isPartial ? Optional.of(SystemSessionProperties.getMaxPartialTopNMemory(session)).filter( maxSize -> maxSize.compareTo(DataSize.ofBytes(0)) > 0) : Optional.empty(); @@ -1095,7 +1092,6 @@ public PhysicalOperation visitTopNRanking(TopNRankingNode node, LocalExecutionPl sortChannels, node.getMaxRankingPerPartition(), isPartial, - hashChannel, 1000, maxPartialTopNMemorySize, hashStrategyCompiler, @@ -1923,7 +1919,6 @@ public PhysicalOperation visitDistinctLimit(DistinctLimitNode node, LocalExecuti { PhysicalOperation source = node.getSource().accept(this, context); - Optional hashChannel = node.getHashSymbol().map(channelGetter(source)); List distinctChannels = getChannelsForSymbols(node.getDistinctSymbols(), source.getLayout()); OperatorFactory operatorFactory = new DistinctLimitOperatorFactory( @@ -1932,7 +1927,6 @@ public PhysicalOperation visitDistinctLimit(DistinctLimitNode node, LocalExecuti source.getTypes(), distinctChannels, node.getLimit(), - hashChannel, hashStrategyCompiler); return new PhysicalOperation(operatorFactory, makeLayout(node), source); } @@ -2008,8 +2002,7 @@ public PhysicalOperation visitMarkDistinct(MarkDistinctNode node, LocalExecution PhysicalOperation source = node.getSource().accept(this, context); List channels = getChannelsForSymbols(node.getDistinctSymbols(), source.getLayout()); - Optional hashChannel = node.getHashSymbol().map(channelGetter(source)); - MarkDistinctOperatorFactory operator = new MarkDistinctOperatorFactory(context.getNextOperatorId(), node.getId(), source.getTypes(), channels, hashChannel, hashStrategyCompiler); + MarkDistinctOperatorFactory operator = new MarkDistinctOperatorFactory(context.getNextOperatorId(), node.getId(), source.getTypes(), channels, hashStrategyCompiler); return new PhysicalOperation(operator, makeLayout(node), source); } @@ -3375,7 +3368,6 @@ public PhysicalOperation visitTableWriter(TableWriterNode node, LocalExecutionPl groupingSymbols, PARTIAL, Optional.empty(), - Optional.empty(), source, false, false, @@ -3452,7 +3444,6 @@ public PhysicalOperation visitTableFinish(TableFinishNode node, LocalExecutionPl groupingSymbols, FINAL, Optional.empty(), - Optional.empty(), source, false, false, @@ -4036,7 +4027,6 @@ private PhysicalOperation planGroupByAggregation( node.getGlobalGroupingSets(), node.getGroupingKeys(), node.getStep(), - node.getHashSymbol(), node.getGroupIdSymbol(), source, node.hasDefaultOutput(), @@ -4057,7 +4047,6 @@ private OperatorFactory createHashAggregationOperatorFactory( Set globalGroupingSets, List groupBySymbols, Step step, - Optional hashSymbol, Optional groupIdSymbol, PhysicalOperation source, boolean hasDefaultOutput, @@ -4091,11 +4080,6 @@ private OperatorFactory createHashAggregationOperatorFactory( channel++; } - // hashChannel follows the group by channels - if (hashSymbol.isPresent()) { - outputMappings.put(hashSymbol.get(), channel++); - } - // aggregations go in following channels for (Symbol symbol : aggregationOutputSymbols) { outputMappings.put(symbol, channel); @@ -4117,7 +4101,6 @@ private OperatorFactory createHashAggregationOperatorFactory( aggregatorFactories, joinCompiler); } - Optional hashChannel = hashSymbol.map(channelGetter(source)); return new HashAggregationOperatorFactory( context.getNextOperatorId(), planNodeId, @@ -4127,7 +4110,6 @@ private OperatorFactory createHashAggregationOperatorFactory( step, hasDefaultOutput, aggregatorFactories, - hashChannel, groupIdChannel, expectedGroups, maxPartialAggregationMemorySize, diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/QueryPlanner.java b/core/trino-main/src/main/java/io/trino/sql/planner/QueryPlanner.java index 3289d6c195d3..2c9a7a763186 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/QueryPlanner.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/QueryPlanner.java @@ -909,7 +909,7 @@ public MergeWriterNode plan(Merge merge) // Mark distinct combinations of the unique_id value and the case_number Symbol isDistinctSymbol = symbolAllocator.newSymbol("is_distinct", BOOLEAN); - MarkDistinctNode markDistinctNode = new MarkDistinctNode(idAllocator.getNextId(), project, isDistinctSymbol, ImmutableList.of(uniqueIdSymbol, caseNumberSymbol), Optional.empty()); + MarkDistinctNode markDistinctNode = new MarkDistinctNode(idAllocator.getNextId(), project, isDistinctSymbol, ImmutableList.of(uniqueIdSymbol, caseNumberSymbol)); // Raise an error if unique_id symbol is non-null and the unique_id/case_number combination was not distinct Expression filter = ifExpression( @@ -1311,7 +1311,6 @@ private PlanBuilder planAggregation(PlanBuilder subPlan, List> grou globalGroupingSets.build()), ImmutableList.of(), AggregationNode.Step.SINGLE, - Optional.empty(), groupIdSymbol); return new PlanBuilder( diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/AddIntermediateAggregations.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/AddIntermediateAggregations.java index 27ff7782ad2c..dc5985e58847 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/AddIntermediateAggregations.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/AddIntermediateAggregations.java @@ -116,7 +116,6 @@ public Result apply(AggregationNode aggregation, Captures captures, Context cont aggregation.getGroupingSets(), aggregation.getPreGroupedSymbols(), AggregationNode.Step.INTERMEDIATE, - aggregation.getHashSymbol(), aggregation.getGroupIdSymbol()); source = ExchangeNode.gatheringExchange(idAllocator.getNextId(), ExchangeNode.Scope.LOCAL, source); } diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/DecorrelateUnnest.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/DecorrelateUnnest.java index 180a70b3375e..425eff468890 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/DecorrelateUnnest.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/DecorrelateUnnest.java @@ -400,8 +400,7 @@ public RewriteResult visitEnforceSingleRow(EnforceSingleRowNode node, Void conte ImmutableList.of(uniqueSymbol), false, rowNumberSymbol, - Optional.of(2), - Optional.empty()); + Optional.of(2)); } Expression predicate = ifExpression( new Comparison( @@ -442,7 +441,6 @@ public RewriteResult visitLimit(LimitNode node, Void context) ImmutableList.of(uniqueSymbol), false, rowNumberSymbol, - Optional.empty(), Optional.empty()); } diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/ImplementOffset.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/ImplementOffset.java index ef6b4ca9bf23..a57592df861f 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/ImplementOffset.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/ImplementOffset.java @@ -71,7 +71,6 @@ public Result apply(OffsetNode parent, Captures captures, Context context) ImmutableList.of(), true, rowNumberSymbol, - Optional.empty(), Optional.empty()); FilterNode filterNode = new FilterNode( diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/MergeLimitWithDistinct.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/MergeLimitWithDistinct.java index 5cce2a3b8696..880e1c8d2a14 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/MergeLimitWithDistinct.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/MergeLimitWithDistinct.java @@ -54,7 +54,6 @@ public Result apply(LimitNode parent, Captures captures, Context context) child.getSource(), parent.getCount(), false, - child.getGroupingKeys(), - child.getHashSymbol())); + child.getGroupingKeys())); } } diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/MultipleDistinctAggregationToMarkDistinct.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/MultipleDistinctAggregationToMarkDistinct.java index bc41ef0a012b..a310cdf7ce5a 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/MultipleDistinctAggregationToMarkDistinct.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/MultipleDistinctAggregationToMarkDistinct.java @@ -154,8 +154,7 @@ public Result apply(AggregationNode parent, Captures captures, Context context) context.getIdAllocator().getNextId(), subPlan, marker, - ImmutableList.copyOf(distinctSymbols.build()), - Optional.empty()); + ImmutableList.copyOf(distinctSymbols.build())); } // remove the distinct flag and set the distinct marker diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/MultipleDistinctAggregationsToSubqueries.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/MultipleDistinctAggregationsToSubqueries.java index ba0617b79d2e..8e1f268009a3 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/MultipleDistinctAggregationsToSubqueries.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/MultipleDistinctAggregationsToSubqueries.java @@ -92,9 +92,7 @@ public static boolean isAggregationCandidateForSplittingToSubqueries(Aggregation return SingleDistinctAggregationToGroupBy.allDistinctAggregates(aggregationNode) && OptimizeMixedDistinctAggregations.hasMultipleDistincts(aggregationNode) && // if we have more than one grouping set, we can have duplicated grouping sets and handling this is complex - aggregationNode.getGroupingSetCount() == 1 && - // hash symbol is added late in the planning, and handling it here would increase complexity - aggregationNode.getHashSymbol().isEmpty(); + aggregationNode.getGroupingSetCount() == 1; } private final DistinctAggregationStrategyChooser distinctAggregationStrategyChooser; diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/OptimizeMixedDistinctAggregations.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/OptimizeMixedDistinctAggregations.java index f9f9fb427429..abfd1e0c3816 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/OptimizeMixedDistinctAggregations.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/OptimizeMixedDistinctAggregations.java @@ -336,7 +336,6 @@ public Result apply(AggregationNode node, Captures captures, Context context) singleGroupingSet(ImmutableList.copyOf(innerAggregationGropingKeys)), ImmutableList.of(), SINGLE, - node.getHashSymbol(), Optional.empty()); // 3. Add a new project node with FILTER expressions @@ -352,7 +351,6 @@ public Result apply(AggregationNode node, Captures captures, Context context) node.getGroupingSets(), ImmutableList.of(), node.getStep(), - Optional.empty(), node.getGroupIdSymbol()); Map coalesceSymbols = coalesceSymbolsBuilder.buildOrThrow(); diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PreAggregateCaseAggregations.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PreAggregateCaseAggregations.java index bf299a40e82d..0de5602456ce 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PreAggregateCaseAggregations.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PreAggregateCaseAggregations.java @@ -213,7 +213,6 @@ private AggregationNode createNewAggregation( aggregationNode.getGroupingSets(), aggregationNode.getPreGroupedSymbols(), aggregationNode.getStep(), - aggregationNode.getHashSymbol(), aggregationNode.getGroupIdSymbol()); } diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PruneAggregationSourceColumns.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PruneAggregationSourceColumns.java index 884d351cc9fd..fc7c2b65722c 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PruneAggregationSourceColumns.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PruneAggregationSourceColumns.java @@ -43,7 +43,6 @@ public Result apply(AggregationNode aggregationNode, Captures captures, Context { Set requiredInputs = Streams.concat( aggregationNode.getGroupingKeys().stream(), - aggregationNode.getHashSymbol().stream(), aggregationNode.getAggregations().values().stream() .flatMap(aggregation -> SymbolsExtractor.extractUnique(aggregation).stream())) .collect(toImmutableSet()); diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PruneDistinctLimitSourceColumns.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PruneDistinctLimitSourceColumns.java index 6c91cda701e2..91a9244d9ba6 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PruneDistinctLimitSourceColumns.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PruneDistinctLimitSourceColumns.java @@ -39,7 +39,6 @@ public Result apply(DistinctLimitNode distinctLimit, Captures captures, Context { ImmutableSet.Builder expectedInputs = ImmutableSet.builder(); expectedInputs.addAll(distinctLimit.getOutputSymbols()); - distinctLimit.getHashSymbol().ifPresent(expectedInputs::add); return restrictChildOutputs(context.getIdAllocator(), distinctLimit, expectedInputs.build()) .map(Result::ofPlanNode) diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PruneMarkDistinctColumns.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PruneMarkDistinctColumns.java index 0f344a5a285c..fbd6ff5e3dc6 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PruneMarkDistinctColumns.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PruneMarkDistinctColumns.java @@ -43,8 +43,7 @@ protected Optional pushDownProjectOff(Context context, MarkDistinctNod Set requiredInputs = Streams.concat( referencedOutputs.stream() .filter(symbol -> !symbol.equals(markDistinctNode.getMarkerSymbol())), - markDistinctNode.getDistinctSymbols().stream(), - markDistinctNode.getHashSymbol().stream()) + markDistinctNode.getDistinctSymbols().stream()) .collect(toImmutableSet()); return restrictChildOutputs(context.getIdAllocator(), markDistinctNode, requiredInputs); diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PruneRowNumberColumns.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PruneRowNumberColumns.java index afd9db4fa976..e247d9dafe28 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PruneRowNumberColumns.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PruneRowNumberColumns.java @@ -54,8 +54,7 @@ protected Optional pushDownProjectOff(Context context, RowNumberNode r Set requiredInputs = Streams.concat( referencedOutputs.stream() .filter(symbol -> !symbol.equals(rowNumberNode.getRowNumberSymbol())), - rowNumberNode.getPartitionBy().stream(), - rowNumberNode.getHashSymbol().stream()) + rowNumberNode.getPartitionBy().stream()) .collect(toImmutableSet()); return restrictChildOutputs(context.getIdAllocator(), rowNumberNode, requiredInputs); diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PruneTopNRankingColumns.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PruneTopNRankingColumns.java index 9f8895144b46..2dc7954a1f4b 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PruneTopNRankingColumns.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PruneTopNRankingColumns.java @@ -40,8 +40,7 @@ protected Optional pushDownProjectOff(Context context, TopNRankingNode referencedOutputs.stream() .filter(symbol -> !symbol.equals(topNRankingNode.getRankingSymbol())), topNRankingNode.getPartitionBy().stream(), - topNRankingNode.getOrderingScheme().orderBy().stream(), - topNRankingNode.getHashSymbol().stream()) + topNRankingNode.getOrderingScheme().orderBy().stream()) .collect(toImmutableSet()); return restrictChildOutputs(context.getIdAllocator(), topNRankingNode, requiredInputs); diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushAggregationThroughOuterJoin.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushAggregationThroughOuterJoin.java index 2c282819c3e6..30932872c408 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushAggregationThroughOuterJoin.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushAggregationThroughOuterJoin.java @@ -46,7 +46,6 @@ import java.util.Optional; import java.util.Set; -import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.base.Preconditions.checkState; import static com.google.common.collect.ImmutableList.toImmutableList; import static io.trino.SystemSessionProperties.isPushAggregationThroughOuterJoin; @@ -121,9 +120,6 @@ public boolean isEnabled(Session session) @Override public Result apply(AggregationNode aggregation, Captures captures, Context context) { - // This rule doesn't deal with AggregationNode's hash symbol. Hash symbols are not yet present at this stage of optimization. - checkArgument(aggregation.getHashSymbol().isEmpty(), "unexpected hash symbol"); - JoinNode join = captures.get(JOIN); if (join.getFilter().isPresent() diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushPartialAggregationThroughExchange.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushPartialAggregationThroughExchange.java index 7df67f5bcfea..d46bfa1479a0 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushPartialAggregationThroughExchange.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushPartialAggregationThroughExchange.java @@ -129,7 +129,7 @@ public Result apply(AggregationNode aggregationNode, Captures captures, Context } // currently, we only support plans that don't use pre-computed hash functions - if (aggregationNode.getHashSymbol().isPresent() || exchangeNode.getPartitioningScheme().getHashColumn().isPresent()) { + if (exchangeNode.getPartitioningScheme().getHashColumn().isPresent()) { return Result.empty(); } @@ -246,7 +246,6 @@ private PlanNode split(AggregationNode node, Context context) // through the exchange may or may not preserve these properties. Hence, it is safest to drop preGroupedSymbols here. ImmutableList.of(), PARTIAL, - node.getHashSymbol(), node.getGroupIdSymbol()); return new AggregationNode( @@ -258,7 +257,6 @@ private PlanNode split(AggregationNode node, Context context) // through the exchange may or may not preserve these properties. Hence, it is safest to drop preGroupedSymbols here. ImmutableList.of(), FINAL, - node.getHashSymbol(), node.getGroupIdSymbol()); } } diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushPartialAggregationThroughJoin.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushPartialAggregationThroughJoin.java index 9b9561c522ab..1c160f7ff7e5 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushPartialAggregationThroughJoin.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushPartialAggregationThroughJoin.java @@ -68,10 +68,6 @@ private static boolean isSupportedAggregationNode(AggregationNode aggregationNod return false; } - if (aggregationNode.getHashSymbol().isPresent()) { - // TODO: add support for hash symbol in aggregation node - return false; - } return aggregationNode.getStep() == PARTIAL && aggregationNode.getGroupingSetCount() == 1; } @@ -355,8 +351,6 @@ private PlanNode toIntermediateAggregation(AggregationNode partialAggregation, P // through the join may or may not preserve these properties. Hence, it is safest to drop preGroupedSymbols here. ImmutableList.of(), INTERMEDIATE, - // hash symbol is not supported by this rule - Optional.empty(), partialAggregation.getGroupIdSymbol()); } } diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushPredicateThroughProjectIntoRowNumber.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushPredicateThroughProjectIntoRowNumber.java index 76b11475c061..ac2a56acc5ea 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushPredicateThroughProjectIntoRowNumber.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushPredicateThroughProjectIntoRowNumber.java @@ -126,8 +126,7 @@ public Result apply(FilterNode filter, Captures captures, Context context) rowNumber.getPartitionBy(), rowNumber.isOrderSensitive(), rowNumber.getRowNumberSymbol(), - Optional.of(upperBound.getAsInt()), - rowNumber.getHashSymbol()); + Optional.of(upperBound.getAsInt())); project = (ProjectNode) project.replaceChildren(ImmutableList.of(rowNumber)); updatedMaxRowCountPerPartition = true; } diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushPredicateThroughProjectIntoWindow.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushPredicateThroughProjectIntoWindow.java index d4070f9ddfd4..68af444a0823 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushPredicateThroughProjectIntoWindow.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushPredicateThroughProjectIntoWindow.java @@ -34,7 +34,6 @@ import io.trino.sql.planner.plan.ValuesNode; import io.trino.sql.planner.plan.WindowNode; -import java.util.Optional; import java.util.OptionalInt; import static com.google.common.collect.Iterables.getOnlyElement; @@ -138,8 +137,7 @@ public Result apply(FilterNode filter, Captures captures, Context context) rankingType, rankingSymbol, upperBound.getAsInt(), - false, - Optional.empty()))); + false))); if (!allRankingValuesInDomain(tupleDomain, rankingSymbol, upperBound.getAsInt())) { return Result.ofPlanNode(filter.replaceChildren(ImmutableList.of(project))); } diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushdownFilterIntoRowNumber.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushdownFilterIntoRowNumber.java index dca2d24a2439..10cb4bbe9757 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushdownFilterIntoRowNumber.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushdownFilterIntoRowNumber.java @@ -171,7 +171,6 @@ private static RowNumberNode mergeLimit(RowNumberNode node, int newRowCountPerPa node.getPartitionBy(), node.isOrderSensitive(), node.getRowNumberSymbol(), - Optional.of(newRowCountPerPartition), - node.getHashSymbol()); + Optional.of(newRowCountPerPartition)); } } diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushdownFilterIntoWindow.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushdownFilterIntoWindow.java index cece80c57aa2..c18ea8c5294e 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushdownFilterIntoWindow.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushdownFilterIntoWindow.java @@ -111,8 +111,7 @@ public Result apply(FilterNode node, Captures captures, Context context) rankingType.get(), rankingSymbol, upperBound.getAsInt(), - false, - Optional.empty()); + false); if (!allRowNumberValuesInDomain(tupleDomain, rankingSymbol, upperBound.getAsInt())) { return Result.ofPlanNode(new FilterNode(node.getId(), newSource, node.getPredicate())); diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushdownLimitIntoRowNumber.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushdownLimitIntoRowNumber.java index 57b8cbb375bd..d3422aeae1e7 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushdownLimitIntoRowNumber.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushdownLimitIntoRowNumber.java @@ -73,7 +73,6 @@ private static RowNumberNode mergeLimit(RowNumberNode node, LimitNode limitNode) node.getPartitionBy(), limitNode.requiresPreSortedInputs() || node.isOrderSensitive(), node.getRowNumberSymbol(), - Optional.of(newRowCountPerPartition), - node.getHashSymbol()); + Optional.of(newRowCountPerPartition)); } } diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushdownLimitIntoWindow.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushdownLimitIntoWindow.java index 52017af432b4..241c7fc73266 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushdownLimitIntoWindow.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushdownLimitIntoWindow.java @@ -82,8 +82,7 @@ public Result apply(LimitNode node, Captures captures, Context context) rankingType.get(), getOnlyElement(source.getWindowFunctions().keySet()), limit, - false, - Optional.empty()); + false); if (rankingType.get() == ROW_NUMBER && source.getPartitionBy().isEmpty()) { return Result.ofPlanNode(topNRowNumberNode); } diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/RemoveEmptyGlobalAggregation.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/RemoveEmptyGlobalAggregation.java index 74f5edc8f925..a3724a47eaa7 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/RemoveEmptyGlobalAggregation.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/RemoveEmptyGlobalAggregation.java @@ -43,8 +43,6 @@ public Pattern getPattern() @Override public Result apply(AggregationNode node, Captures captures, Context context) { - // There should be no hash symbol in a global aggregation - checkArgument(node.getHashSymbol().isEmpty(), "Unexpected hash symbol: %s", node.getHashSymbol()); // There should be no output symbols, since there is no information the aggregation could return checkArgument(node.getOutputSymbols().isEmpty(), "Unexpected output symbols: %s", node.getOutputSymbols()); diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/RemoveRedundantDistinctLimit.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/RemoveRedundantDistinctLimit.java index 5f10ea8fc45d..c45cbb333509 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/RemoveRedundantDistinctLimit.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/RemoveRedundantDistinctLimit.java @@ -25,7 +25,6 @@ import java.util.Optional; -import static com.google.common.base.Preconditions.checkArgument; import static io.trino.sql.planner.optimizations.QueryCardinalityUtil.extractCardinality; import static io.trino.sql.planner.plan.AggregationNode.Step.SINGLE; import static io.trino.sql.planner.plan.AggregationNode.singleGroupingSet; @@ -51,7 +50,6 @@ public Pattern getPattern() @Override public Result apply(DistinctLimitNode node, Captures captures, Context context) { - checkArgument(node.getHashSymbol().isEmpty(), "HashSymbol should be empty"); if (node.getLimit() == 0) { return Result.ofPlanNode(new ValuesNode(node.getId(), node.getOutputSymbols())); } @@ -67,7 +65,6 @@ public Result apply(DistinctLimitNode node, Captures captures, Context context) singleGroupingSet(node.getDistinctSymbols()), ImmutableList.of(), SINGLE, - node.getHashSymbol(), Optional.empty())); } return Result.empty(); diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/ReplaceWindowWithRowNumber.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/ReplaceWindowWithRowNumber.java index 2353e1d0a2db..548d22efa6bf 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/ReplaceWindowWithRowNumber.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/ReplaceWindowWithRowNumber.java @@ -62,7 +62,6 @@ public Result apply(WindowNode node, Captures captures, Context context) node.getPartitionBy(), false, getOnlyElement(node.getWindowFunctions().keySet()), - Optional.empty(), Optional.empty())); } } diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/TransformCorrelatedDistinctAggregationWithoutProjection.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/TransformCorrelatedDistinctAggregationWithoutProjection.java index d3aa9161e934..b5df9fb8c336 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/TransformCorrelatedDistinctAggregationWithoutProjection.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/TransformCorrelatedDistinctAggregationWithoutProjection.java @@ -138,8 +138,6 @@ public Result apply(CorrelatedJoinNode correlatedJoinNode, Captures captures, Co .build())) .setPreGroupedSymbols( ImmutableList.of()) - .setHashSymbol( - Optional.empty()) .setGroupIdSymbol(Optional.empty()) .build(); diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/TransformCorrelatedGroupedAggregationWithProjection.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/TransformCorrelatedGroupedAggregationWithProjection.java index d61515eddde1..98de6640018c 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/TransformCorrelatedGroupedAggregationWithProjection.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/TransformCorrelatedGroupedAggregationWithProjection.java @@ -206,7 +206,6 @@ public Result apply(CorrelatedJoinNode correlatedJoinNode, Captures captures, Co .addAll(groupedAggregation.getGroupingKeys()) .build())) .setPreGroupedSymbols(ImmutableList.of()) - .setHashSymbol(Optional.empty()) .setGroupIdSymbol(Optional.empty()) .build(); diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/TransformCorrelatedGroupedAggregationWithoutProjection.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/TransformCorrelatedGroupedAggregationWithoutProjection.java index 21b92b355f84..f074fc029a04 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/TransformCorrelatedGroupedAggregationWithoutProjection.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/TransformCorrelatedGroupedAggregationWithoutProjection.java @@ -199,7 +199,6 @@ public Result apply(CorrelatedJoinNode correlatedJoinNode, Captures captures, Co .addAll(groupedAggregation.getGroupingKeys()) .build())) .setPreGroupedSymbols(ImmutableList.of()) - .setHashSymbol(Optional.empty()) .setGroupIdSymbol(Optional.empty()) .build(); diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/TransformCorrelatedScalarSubquery.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/TransformCorrelatedScalarSubquery.java index b71047ba811f..bf83e3d8310e 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/TransformCorrelatedScalarSubquery.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/TransformCorrelatedScalarSubquery.java @@ -33,8 +33,6 @@ import io.trino.sql.planner.plan.PlanNode; import io.trino.sql.planner.plan.ProjectNode; -import java.util.Optional; - import static com.google.common.base.Preconditions.checkArgument; import static io.trino.matching.Pattern.nonEmpty; import static io.trino.spi.StandardErrorCode.SUBQUERY_MULTIPLE_ROWS; @@ -153,8 +151,7 @@ public Result apply(CorrelatedJoinNode correlatedJoinNode, Captures captures, Co context.getIdAllocator().getNextId(), rewrittenCorrelatedJoinNode, isDistinct, - rewrittenCorrelatedJoinNode.getInput().getOutputSymbols(), - Optional.empty()); + rewrittenCorrelatedJoinNode.getInput().getOutputSymbols()); FilterNode filterNode = new FilterNode( context.getIdAllocator().getNextId(), diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/AddExchanges.java b/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/AddExchanges.java index c49b59c25f4f..da8ff68bd480 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/AddExchanges.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/AddExchanges.java @@ -264,7 +264,7 @@ else if (!isNodePartitionedOn(child.getProperties(), partitioningRequirement) || .flatMap(partitioningColumns -> useParentPreferredPartitioning(node, partitioningColumns)) .orElse(node.getGroupingKeys()); child = withDerivedProperties( - partitionedExchange(idAllocator.getNextId(), REMOTE, child.getNode(), partitioningKeys, node.getHashSymbol()), + partitionedExchange(idAllocator.getNextId(), REMOTE, child.getNode(), partitioningKeys, Optional.empty()), child.getProperties()); } return rebaseAndDeriveProperties(node, child); @@ -359,7 +359,7 @@ public PlanWithProperties visitMarkDistinct(MarkDistinctNode node, PreferredProp REMOTE, child.getNode(), node.getDistinctSymbols(), - node.getHashSymbol()), + Optional.empty()), child.getProperties()); } @@ -508,7 +508,7 @@ public PlanWithProperties visitRowNumber(RowNumberNode node, PreferredProperties REMOTE, child.getNode(), node.getPartitionBy(), - node.getHashSymbol()), + Optional.empty()), child.getProperties()); } @@ -531,7 +531,7 @@ public PlanWithProperties visitTopNRanking(TopNRankingNode node, PreferredProper preferredChildProperties = computePreference( partitionedWithLocal(ImmutableSet.copyOf(node.getPartitionBy()), grouped(node.getPartitionBy())), preferredProperties); - addExchange = partial -> partitionedExchange(idAllocator.getNextId(), REMOTE, partial, node.getPartitionBy(), node.getHashSymbol()); + addExchange = partial -> partitionedExchange(idAllocator.getNextId(), REMOTE, partial, node.getPartitionBy(), Optional.empty()); } PlanWithProperties child = planChild(node, preferredChildProperties); @@ -545,8 +545,7 @@ public PlanWithProperties visitTopNRanking(TopNRankingNode node, PreferredProper node.getRankingType(), node.getRankingSymbol(), node.getMaxRankingPerPartition(), - true, - node.getHashSymbol()), + true), child.getProperties()); child = withDerivedProperties(addExchange.apply(child.getNode()), child.getProperties()); @@ -655,7 +654,7 @@ public PlanWithProperties visitDistinctLimit(DistinctLimitNode node, PreferredPr gatheringExchange( idAllocator.getNextId(), REMOTE, - new DistinctLimitNode(idAllocator.getNextId(), child.getNode(), node.getLimit(), true, node.getDistinctSymbols(), node.getHashSymbol())), + new DistinctLimitNode(idAllocator.getNextId(), child.getNode(), node.getLimit(), true, node.getDistinctSymbols())), child.getProperties()); } diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/AddLocalExchanges.java b/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/AddLocalExchanges.java index 3f5697118240..041055d9b1d7 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/AddLocalExchanges.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/AddLocalExchanges.java @@ -569,8 +569,7 @@ public PlanWithProperties visitMarkDistinct(MarkDistinctNode node, StreamPreferr node.getId(), child.getNode(), node.getMarkerSymbol(), - pruneMarkDistinctSymbols(node, child.getProperties().getLocalProperties()), - node.getHashSymbol()); + pruneMarkDistinctSymbols(node, child.getProperties().getLocalProperties())); return deriveProperties(result, child.getProperties()); } diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/LimitPushDown.java b/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/LimitPushDown.java index cee02a466ab2..45fd3fe09b31 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/LimitPushDown.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/LimitPushDown.java @@ -29,7 +29,6 @@ import java.util.ArrayList; import java.util.List; -import java.util.Optional; import static com.google.common.base.MoreObjects.toStringHelper; import static java.util.Objects.requireNonNull; @@ -134,7 +133,7 @@ public PlanNode visitAggregation(AggregationNode node, RewriteContext rewriteLimitWithRowCountGreaterThanOne(Dec ImmutableList.copyOf(childDecorrelationResult.symbolsToPropagate), false, symbolAllocator.newSymbol("row_number", BIGINT), - Optional.of(toIntExact(node.getCount())), - Optional.empty()); + Optional.of(toIntExact(node.getCount()))); return Optional.of(new DecorrelationResult( rowNumberNode, @@ -337,8 +336,7 @@ public Optional visitTopN(TopNNode node, Void context) ROW_NUMBER, symbolAllocator.newSymbol("ranking", BIGINT), toIntExact(node.getCount()), - false, - Optional.empty()); + false); return Optional.of(new DecorrelationResult( topNRankingNode, @@ -356,8 +354,7 @@ public Optional visitTopN(TopNNode node, Void context) ImmutableList.copyOf(childDecorrelationResult.symbolsToPropagate), false, symbolAllocator.newSymbol("row_number", BIGINT), - Optional.of(toIntExact(node.getCount())), - Optional.empty()); + Optional.of(toIntExact(node.getCount()))); return Optional.of(new DecorrelationResult( rowNumberNode, diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/SymbolMapper.java b/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/SymbolMapper.java index e3484a56fe66..a2af75202d71 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/SymbolMapper.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/SymbolMapper.java @@ -198,7 +198,6 @@ public AggregationNode map(AggregationNode node, PlanNode source, PlanNodeId new node.getGlobalGroupingSets()), ImmutableList.of(), node.getStep(), - node.getHashSymbol().map(this::map), node.getGroupIdSymbol().map(this::map)); } @@ -501,8 +500,7 @@ public DistinctLimitNode map(DistinctLimitNode node, PlanNode source) source, node.getLimit(), node.isPartial(), - mapAndDistinct(node.getDistinctSymbols()), - node.getHashSymbol().map(this::map)); + mapAndDistinct(node.getDistinctSymbols())); } public StatisticsWriterNode map(StatisticsWriterNode node, PlanNode source) @@ -636,8 +634,7 @@ public RowNumberNode map(RowNumberNode node, PlanNode source) mapAndDistinct(node.getPartitionBy()), node.isOrderSensitive(), map(node.getRowNumberSymbol()), - node.getMaxRowCountPerPartition(), - node.getHashSymbol().map(this::map)); + node.getMaxRowCountPerPartition()); } public TopNRankingNode map(TopNRankingNode node, PlanNode source) @@ -649,8 +646,7 @@ public TopNRankingNode map(TopNRankingNode node, PlanNode source) node.getRankingType(), map(node.getRankingSymbol()), node.getMaxRankingPerPartition(), - node.isPartial(), - node.getHashSymbol().map(this::map)); + node.isPartial()); } public TopNNode map(TopNNode node, PlanNode source) diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/UnaliasSymbolReferences.java b/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/UnaliasSymbolReferences.java index 746efb35d8fa..ecc71a7bbffa 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/UnaliasSymbolReferences.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/UnaliasSymbolReferences.java @@ -237,15 +237,13 @@ public PlanAndMappings visitMarkDistinct(MarkDistinctNode node, UnaliasContext c Symbol newMarkerSymbol = mapper.map(node.getMarkerSymbol()); List newDistinctSymbols = mapper.mapAndDistinct(node.getDistinctSymbols()); - Optional newHashSymbol = node.getHashSymbol().map(mapper::map); return new PlanAndMappings( new MarkDistinctNode( node.getId(), rewrittenSource.getRoot(), newMarkerSymbol, - newDistinctSymbols, - newHashSymbol), + newDistinctSymbols), mapping); } diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/WindowFilterPushDown.java b/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/WindowFilterPushDown.java index 111201b4e9be..bdb3d785aaa0 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/WindowFilterPushDown.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/WindowFilterPushDown.java @@ -103,7 +103,6 @@ public PlanNode visitWindow(WindowNode node, RewriteContext context) node.getPartitionBy(), false, getOnlyElement(node.getWindowFunctions().keySet()), - Optional.empty(), Optional.empty()); } return replaceChildren(node, ImmutableList.of(rewrittenSource)); @@ -262,8 +261,7 @@ private static RowNumberNode mergeLimit(RowNumberNode node, int newRowCountPerPa node.getPartitionBy(), node.isOrderSensitive(), node.getRowNumberSymbol(), - Optional.of(newRowCountPerPartition), - node.getHashSymbol()); + Optional.of(newRowCountPerPartition)); } private TopNRankingNode convertToTopNRanking(WindowNode windowNode, RankingType rankingType, int limit) @@ -274,8 +272,7 @@ private TopNRankingNode convertToTopNRanking(WindowNode windowNode, RankingType rankingType, getOnlyElement(windowNode.getWindowFunctions().keySet()), limit, - false, - Optional.empty()); + false); } private boolean canReplaceWithRowNumber(WindowNode node) diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/plan/AggregationNode.java b/core/trino-main/src/main/java/io/trino/sql/planner/plan/AggregationNode.java index 1c797eb0faec..e8c7c4af5083 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/plan/AggregationNode.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/plan/AggregationNode.java @@ -50,7 +50,6 @@ public class AggregationNode private final GroupingSetDescriptor groupingSets; private final List preGroupedSymbols; private final Step step; - private final Optional hashSymbol; private final Optional groupIdSymbol; private final List outputs; /** @@ -65,7 +64,7 @@ public static AggregationNode singleAggregation( Map aggregations, GroupingSetDescriptor groupingSets) { - return new AggregationNode(id, source, aggregations, groupingSets, ImmutableList.of(), SINGLE, Optional.empty(), Optional.empty()); + return new AggregationNode(id, source, aggregations, groupingSets, ImmutableList.of(), SINGLE, Optional.empty()); } public AggregationNode( @@ -75,10 +74,9 @@ public AggregationNode( GroupingSetDescriptor groupingSets, List preGroupedSymbols, Step step, - Optional hashSymbol, Optional groupIdSymbol) { - this(id, source, aggregations, groupingSets, preGroupedSymbols, step, hashSymbol, groupIdSymbol, Optional.empty()); + this(id, source, aggregations, groupingSets, preGroupedSymbols, step, groupIdSymbol, Optional.empty()); } @JsonCreator @@ -89,7 +87,6 @@ public AggregationNode( @JsonProperty("groupingSets") GroupingSetDescriptor groupingSets, @JsonProperty("preGroupedSymbols") List preGroupedSymbols, @JsonProperty("step") Step step, - @JsonProperty("hashSymbol") Optional hashSymbol, @JsonProperty("groupIdSymbol") Optional groupIdSymbol, @JsonProperty("isInputReducingAggregation") Optional isInputReducingAggregation) { @@ -111,7 +108,6 @@ public AggregationNode( checkArgument(noOrderBy || step == SINGLE, "ORDER BY does not support distributed aggregation"); this.step = step; - this.hashSymbol = hashSymbol; requireNonNull(preGroupedSymbols, "preGroupedSymbols is null"); checkArgument(preGroupedSymbols.isEmpty() || groupingSets.getGroupingKeys().containsAll(preGroupedSymbols), "Pre-grouped symbols must be a subset of the grouping keys"); @@ -119,7 +115,6 @@ public AggregationNode( ImmutableList.Builder outputs = ImmutableList.builder(); outputs.addAll(groupingSets.getGroupingKeys()); - hashSymbol.ifPresent(outputs::add); outputs.addAll(aggregations.keySet()); this.outputs = outputs.build(); @@ -215,12 +210,6 @@ public Step getStep() return step; } - @JsonProperty("hashSymbol") - public Optional getHashSymbol() - { - return hashSymbol; - } - @JsonProperty("groupIdSymbol") public Optional getGroupIdSymbol() { @@ -537,7 +526,6 @@ public static class Builder private GroupingSetDescriptor groupingSets; private List preGroupedSymbols; private Step step; - private Optional hashSymbol; private Optional groupIdSymbol; private Optional isInputReducingAggregation; @@ -550,7 +538,6 @@ public Builder(AggregationNode node) this.groupingSets = node.getGroupingSets(); this.preGroupedSymbols = node.getPreGroupedSymbols(); this.step = node.getStep(); - this.hashSymbol = node.getHashSymbol(); this.groupIdSymbol = node.getGroupIdSymbol(); this.isInputReducingAggregation = node.isInputReducingAggregation; } @@ -591,12 +578,6 @@ public Builder setStep(Step step) return this; } - public Builder setHashSymbol(Optional hashSymbol) - { - this.hashSymbol = requireNonNull(hashSymbol, "hashSymbol is null"); - return this; - } - public Builder setGroupIdSymbol(Optional groupIdSymbol) { this.groupIdSymbol = requireNonNull(groupIdSymbol, "groupIdSymbol is null"); @@ -618,7 +599,6 @@ public AggregationNode build() groupingSets, preGroupedSymbols, step, - hashSymbol, groupIdSymbol, isInputReducingAggregation); } diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/plan/DistinctLimitNode.java b/core/trino-main/src/main/java/io/trino/sql/planner/plan/DistinctLimitNode.java index 5b2e56dfe94b..b591715dd19e 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/plan/DistinctLimitNode.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/plan/DistinctLimitNode.java @@ -21,7 +21,6 @@ import io.trino.sql.planner.Symbol; import java.util.List; -import java.util.Optional; import static com.google.common.base.Preconditions.checkArgument; import static java.util.Objects.requireNonNull; @@ -34,7 +33,6 @@ public class DistinctLimitNode private final long limit; private final boolean partial; private final List distinctSymbols; - private final Optional hashSymbol; @JsonCreator public DistinctLimitNode( @@ -42,8 +40,7 @@ public DistinctLimitNode( @JsonProperty("source") PlanNode source, @JsonProperty("limit") long limit, @JsonProperty("partial") boolean partial, - @JsonProperty("distinctSymbols") List distinctSymbols, - @JsonProperty("hashSymbol") Optional hashSymbol) + @JsonProperty("distinctSymbols") List distinctSymbols) { super(id); this.source = requireNonNull(source, "source is null"); @@ -51,8 +48,6 @@ public DistinctLimitNode( this.limit = limit; this.partial = partial; this.distinctSymbols = ImmutableList.copyOf(distinctSymbols); - this.hashSymbol = requireNonNull(hashSymbol, "hashSymbol is null"); - checkArgument(hashSymbol.isEmpty() || !distinctSymbols.contains(hashSymbol.get()), "distinctSymbols should not contain hash symbol"); } @Override @@ -79,12 +74,6 @@ public boolean isPartial() return partial; } - @JsonProperty - public Optional getHashSymbol() - { - return hashSymbol; - } - @JsonProperty public List getDistinctSymbols() { @@ -94,10 +83,7 @@ public List getDistinctSymbols() @Override public List getOutputSymbols() { - ImmutableList.Builder outputSymbols = ImmutableList.builder(); - outputSymbols.addAll(distinctSymbols); - hashSymbol.ifPresent(outputSymbols::add); - return outputSymbols.build(); + return distinctSymbols; } @Override @@ -109,6 +95,6 @@ public R accept(PlanVisitor visitor, C context) @Override public PlanNode replaceChildren(List newChildren) { - return new DistinctLimitNode(getId(), Iterables.getOnlyElement(newChildren), limit, partial, distinctSymbols, hashSymbol); + return new DistinctLimitNode(getId(), Iterables.getOnlyElement(newChildren), limit, partial, distinctSymbols); } } diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/plan/MarkDistinctNode.java b/core/trino-main/src/main/java/io/trino/sql/planner/plan/MarkDistinctNode.java index 4ed269785508..939a8886af0b 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/plan/MarkDistinctNode.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/plan/MarkDistinctNode.java @@ -21,7 +21,6 @@ import io.trino.sql.planner.Symbol; import java.util.List; -import java.util.Optional; import static com.google.common.base.Preconditions.checkArgument; import static java.util.Objects.requireNonNull; @@ -33,20 +32,17 @@ public class MarkDistinctNode private final PlanNode source; private final Symbol markerSymbol; - private final Optional hashSymbol; private final List distinctSymbols; @JsonCreator public MarkDistinctNode(@JsonProperty("id") PlanNodeId id, @JsonProperty("source") PlanNode source, @JsonProperty("markerSymbol") Symbol markerSymbol, - @JsonProperty("distinctSymbols") List distinctSymbols, - @JsonProperty("hashSymbol") Optional hashSymbol) + @JsonProperty("distinctSymbols") List distinctSymbols) { super(id); this.source = requireNonNull(source, "source is null"); this.markerSymbol = requireNonNull(markerSymbol, "markerSymbol is null"); - this.hashSymbol = requireNonNull(hashSymbol, "hashSymbol is null"); requireNonNull(distinctSymbols, "distinctSymbols is null"); checkArgument(!distinctSymbols.isEmpty(), "distinctSymbols cannot be empty"); this.distinctSymbols = ImmutableList.copyOf(distinctSymbols); @@ -85,12 +81,6 @@ public List getDistinctSymbols() return distinctSymbols; } - @JsonProperty - public Optional getHashSymbol() - { - return hashSymbol; - } - @Override public R accept(PlanVisitor visitor, C context) { @@ -100,6 +90,6 @@ public R accept(PlanVisitor visitor, C context) @Override public PlanNode replaceChildren(List newChildren) { - return new MarkDistinctNode(getId(), Iterables.getOnlyElement(newChildren), markerSymbol, distinctSymbols, hashSymbol); + return new MarkDistinctNode(getId(), Iterables.getOnlyElement(newChildren), markerSymbol, distinctSymbols); } } diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/plan/RowNumberNode.java b/core/trino-main/src/main/java/io/trino/sql/planner/plan/RowNumberNode.java index 4d84f1254210..58482db98b4e 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/plan/RowNumberNode.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/plan/RowNumberNode.java @@ -43,7 +43,6 @@ public final class RowNumberNode private final boolean orderSensitive; private final Optional maxRowCountPerPartition; private final Symbol rowNumberSymbol; - private final Optional hashSymbol; @JsonCreator public RowNumberNode( @@ -52,8 +51,7 @@ public RowNumberNode( @JsonProperty("partitionBy") List partitionBy, @JsonProperty("orderSensitive") boolean orderSensitive, @JsonProperty("rowNumberSymbol") Symbol rowNumberSymbol, - @JsonProperty("maxRowCountPerPartition") Optional maxRowCountPerPartition, - @JsonProperty("hashSymbol") Optional hashSymbol) + @JsonProperty("maxRowCountPerPartition") Optional maxRowCountPerPartition) { super(id); @@ -63,14 +61,12 @@ public RowNumberNode( requireNonNull(rowNumberSymbol, "rowNumberSymbol is null"); requireNonNull(maxRowCountPerPartition, "maxRowCountPerPartition is null"); checkArgument(maxRowCountPerPartition.isEmpty() || maxRowCountPerPartition.get() > 0, "maxRowCountPerPartition must be greater than zero"); - requireNonNull(hashSymbol, "hashSymbol is null"); this.source = source; this.partitionBy = ImmutableList.copyOf(partitionBy); this.orderSensitive = orderSensitive; this.rowNumberSymbol = rowNumberSymbol; this.maxRowCountPerPartition = maxRowCountPerPartition; - this.hashSymbol = hashSymbol; } @Override @@ -115,12 +111,6 @@ public Optional getMaxRowCountPerPartition() return maxRowCountPerPartition; } - @JsonProperty - public Optional getHashSymbol() - { - return hashSymbol; - } - @Override public R accept(PlanVisitor visitor, C context) { @@ -130,6 +120,6 @@ public R accept(PlanVisitor visitor, C context) @Override public PlanNode replaceChildren(List newChildren) { - return new RowNumberNode(getId(), Iterables.getOnlyElement(newChildren), partitionBy, orderSensitive, rowNumberSymbol, maxRowCountPerPartition, hashSymbol); + return new RowNumberNode(getId(), Iterables.getOnlyElement(newChildren), partitionBy, orderSensitive, rowNumberSymbol, maxRowCountPerPartition); } } diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/plan/TopNRankingNode.java b/core/trino-main/src/main/java/io/trino/sql/planner/plan/TopNRankingNode.java index 03f709abb840..ee419e3566fd 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/plan/TopNRankingNode.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/plan/TopNRankingNode.java @@ -22,7 +22,6 @@ import io.trino.sql.planner.Symbol; import java.util.List; -import java.util.Optional; import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.collect.Iterables.concat; @@ -45,7 +44,6 @@ public enum RankingType private final Symbol rankingSymbol; private final int maxRankingPerPartition; private final boolean partial; - private final Optional hashSymbol; @JsonCreator public TopNRankingNode( @@ -55,8 +53,7 @@ public TopNRankingNode( @JsonProperty("rankingType") RankingType rankingType, @JsonProperty("rankingSymbol") Symbol rankingSymbol, @JsonProperty("maxRankingPerPartition") int maxRankingPerPartition, - @JsonProperty("partial") boolean partial, - @JsonProperty("hashSymbol") Optional hashSymbol) + @JsonProperty("partial") boolean partial) { super(id); @@ -66,7 +63,6 @@ public TopNRankingNode( requireNonNull(rankingType, "rankingType is null"); requireNonNull(rankingSymbol, "rankingSymbol is null"); checkArgument(maxRankingPerPartition > 0, "maxRankingPerPartition must be > 0"); - requireNonNull(hashSymbol, "hashSymbol is null"); this.source = source; this.specification = specification; @@ -74,7 +70,6 @@ public TopNRankingNode( this.rankingSymbol = rankingSymbol; this.maxRankingPerPartition = maxRankingPerPartition; this.partial = partial; - this.hashSymbol = hashSymbol; } @Override @@ -138,12 +133,6 @@ public boolean isPartial() return partial; } - @JsonProperty - public Optional getHashSymbol() - { - return hashSymbol; - } - @Override public R accept(PlanVisitor visitor, C context) { @@ -153,6 +142,6 @@ public R accept(PlanVisitor visitor, C context) @Override public PlanNode replaceChildren(List newChildren) { - return new TopNRankingNode(getId(), Iterables.getOnlyElement(newChildren), specification, rankingType, rankingSymbol, maxRankingPerPartition, partial, hashSymbol); + return new TopNRankingNode(getId(), Iterables.getOnlyElement(newChildren), specification, rankingType, rankingSymbol, maxRankingPerPartition, partial); } } diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/planprinter/PlanPrinter.java b/core/trino-main/src/main/java/io/trino/sql/planner/planprinter/PlanPrinter.java index b916bf5d9fee..3190edd42ac8 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/planprinter/PlanPrinter.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/planprinter/PlanPrinter.java @@ -827,8 +827,7 @@ public Void visitDistinctLimit(DistinctLimitNode node, Context context) addNode(node, format("DistinctLimit%s", node.isPartial() ? "Partial" : ""), ImmutableMap.of( - "limit", String.valueOf(node.getLimit()), - "hash", formatHash(node.getHashSymbol())), + "limit", String.valueOf(node.getLimit())), context); return processChildren(node, new Context(context.isInitialPlan())); } @@ -851,7 +850,7 @@ public Void visitAggregation(AggregationNode node, Context context) NodeRepresentation nodeOutput = addNode( node, "Aggregate", - ImmutableMap.of("type", type, "keys", keys, "hash", formatHash(node.getHashSymbol())), + ImmutableMap.of("type", type, "keys", keys), context); node.getAggregations().forEach((symbol, aggregation) -> @@ -891,8 +890,7 @@ public Void visitMarkDistinct(MarkDistinctNode node, Context context) "MarkDistinct", ImmutableMap.of( "distinct", formatOutputs(node.getDistinctSymbols()), - "marker", anonymizer.anonymize(node.getMarkerSymbol()), - "hash", formatHash(node.getHashSymbol())), + "marker", anonymizer.anonymize(node.getMarkerSymbol())), context); return processChildren(node, new Context(context.isInitialPlan())); @@ -1122,7 +1120,6 @@ public Void visitTopNRanking(TopNRankingNode node, Context context) "TopNRanking", descriptor .put("limit", String.valueOf(node.getMaxRankingPerPartition())) - .put("hash", formatHash(node.getHashSymbol())) .buildOrThrow(), context); @@ -1146,7 +1143,7 @@ public Void visitRowNumber(RowNumberNode node, Context context) NodeRepresentation nodeOutput = addNode( node, "RowNumber", - descriptor.put("hash", formatHash(node.getHashSymbol())).buildOrThrow(), + descriptor.buildOrThrow(), context); nodeOutput.appendDetails("%s := %s", anonymizer.anonymize(node.getRowNumberSymbol()), "row_number()"); diff --git a/core/trino-main/src/test/java/io/trino/cost/TestTopNRankingStatsRule.java b/core/trino-main/src/test/java/io/trino/cost/TestTopNRankingStatsRule.java index 56e018f92482..aa582dc52b06 100644 --- a/core/trino-main/src/test/java/io/trino/cost/TestTopNRankingStatsRule.java +++ b/core/trino-main/src/test/java/io/trino/cost/TestTopNRankingStatsRule.java @@ -74,7 +74,6 @@ public void testRowNumber() ROW_NUMBER, 10, pb.symbol("z", DOUBLE), - Optional.empty(), pb.values(pb.symbol("x", DOUBLE), pb.symbol("y", DOUBLE)))) .withSourceStats(0, PlanNodeStatsEstimate.builder() .setOutputRowCount(10000) @@ -104,7 +103,6 @@ public void testRowNumber() ROW_NUMBER, 20, pb.symbol("z", DOUBLE), - Optional.empty(), pb.values( pb.symbol("x", DOUBLE), pb.symbol("y", DOUBLE), @@ -144,7 +142,6 @@ public void testRank() RANK, 10, pb.symbol("z", DOUBLE), - Optional.empty(), pb.values(pb.symbol("x", DOUBLE), pb.symbol("y", DOUBLE)))) .withSourceStats(0, PlanNodeStatsEstimate.builder() .setOutputRowCount(10000) @@ -174,7 +171,6 @@ public void testRank() RANK, 20, pb.symbol("z", DOUBLE), - Optional.empty(), pb.values( pb.symbol("x", DOUBLE), pb.symbol("y", DOUBLE), @@ -214,7 +210,6 @@ public void testDenseRank() DENSE_RANK, 3, pb.symbol("z", DOUBLE), - Optional.empty(), pb.values(pb.symbol("x", DOUBLE), pb.symbol("y", DOUBLE)))) .withSourceStats(0, PlanNodeStatsEstimate.builder() .setOutputRowCount(10000) @@ -244,7 +239,6 @@ public void testDenseRank() DENSE_RANK, 10, pb.symbol("z", DOUBLE), - Optional.empty(), pb.values( pb.symbol("x", DOUBLE), pb.symbol("y", DOUBLE), @@ -285,7 +279,6 @@ public void testRowNumberWhenOrderByDistinctCountIsNan(RankingType rankingType, rankingType, 10, pb.symbol("z", DOUBLE), - Optional.empty(), pb.values(pb.symbol("x", DOUBLE), pb.symbol("y", DOUBLE)))) .withSourceStats(0, PlanNodeStatsEstimate.builder() .setOutputRowCount(10000) @@ -326,7 +319,6 @@ public void testWhenInputRowCountIsNan(RankingType rankingType) rankingType, 10, pb.symbol("z", DOUBLE), - Optional.empty(), pb.values(pb.symbol("x", DOUBLE), pb.symbol("y", DOUBLE)))) .withSourceStats(0, PlanNodeStatsEstimate.builder() .setOutputRowCount(NaN) @@ -354,7 +346,6 @@ public void testWhenPartitionByDistinctCountIsNan(RankingType rankingType) rankingType, 10, pb.symbol("z", DOUBLE), - Optional.empty(), pb.values(pb.symbol("x", DOUBLE), pb.symbol("y", DOUBLE)))) .withSourceStats(0, PlanNodeStatsEstimate.builder() .setOutputRowCount(10000) @@ -382,7 +373,6 @@ public void testWhenSourceRowCountIsZero(RankingType rankingType) rankingType, 10, pb.symbol("z", DOUBLE), - Optional.empty(), pb.values(pb.symbol("x", DOUBLE), pb.symbol("y", DOUBLE)))) .withSourceStats(0, PlanNodeStatsEstimate.builder() .setOutputRowCount(0) diff --git a/core/trino-main/src/test/java/io/trino/operator/BenchmarkGroupByHash.java b/core/trino-main/src/test/java/io/trino/operator/BenchmarkGroupByHash.java index ef187206fbac..ddb898f62fb3 100644 --- a/core/trino-main/src/test/java/io/trino/operator/BenchmarkGroupByHash.java +++ b/core/trino-main/src/test/java/io/trino/operator/BenchmarkGroupByHash.java @@ -14,16 +14,13 @@ package io.trino.operator; import com.google.common.collect.ImmutableList; -import com.google.common.collect.Iterables; import io.airlift.slice.Slice; import io.airlift.slice.Slices; -import io.airlift.slice.XxHash64; import io.trino.spi.Page; import io.trino.spi.PageBuilder; import io.trino.spi.block.Block; import io.trino.spi.block.DictionaryBlock; import io.trino.spi.block.RunLengthEncodedBlock; -import io.trino.spi.type.AbstractLongType; import io.trino.spi.type.Type; import io.trino.spi.type.TypeOperators; import org.openjdk.jmh.annotations.Benchmark; @@ -107,13 +104,10 @@ private static void addInputPagesToHash(GroupByHash groupByHash, List page } } - private static List createBigintPages(int positionCount, int groupCount, int channelCount, boolean hashPrecomputed, boolean useMixedBlockTypes) + private static List createBigintPages(int positionCount, int groupCount, int channelCount, boolean useMixedBlockTypes) { List types = Collections.nCopies(channelCount, BIGINT); ImmutableList.Builder pages = ImmutableList.builder(); - if (hashPrecomputed) { - types = ImmutableList.copyOf(Iterables.concat(types, ImmutableList.of(BIGINT))); - } PageBuilder pageBuilder = new PageBuilder(types); int pageCount = 0; @@ -123,9 +117,6 @@ private static List createBigintPages(int positionCount, int groupCount, i for (int numChannel = 0; numChannel < channelCount; numChannel++) { BIGINT.writeLong(pageBuilder.getBlockBuilder(numChannel), rand); } - if (hashPrecomputed) { - BIGINT.writeLong(pageBuilder.getBlockBuilder(channelCount), AbstractLongType.hash(rand)); - } if (pageBuilder.isFull()) { Page page = pageBuilder.build(); pageBuilder.reset(); @@ -161,13 +152,10 @@ else if (pageCount % 3 == 1) { return pages.build(); } - private static List createVarcharPages(int positionCount, int groupCount, int channelCount, boolean hashPrecomputed) + private static List createVarcharPages(int positionCount, int groupCount, int channelCount) { List types = Collections.nCopies(channelCount, VARCHAR); ImmutableList.Builder pages = ImmutableList.builder(); - if (hashPrecomputed) { - types = ImmutableList.copyOf(Iterables.concat(types, ImmutableList.of(BIGINT))); - } PageBuilder pageBuilder = new PageBuilder(types); for (int position = 0; position < positionCount; position++) { @@ -177,9 +165,6 @@ private static List createVarcharPages(int positionCount, int groupCount, for (int channel = 0; channel < channelCount; channel++) { VARCHAR.writeSlice(pageBuilder.getBlockBuilder(channel), value); } - if (hashPrecomputed) { - BIGINT.writeLong(pageBuilder.getBlockBuilder(channelCount), XxHash64.hash(value)); - } if (pageBuilder.isFull()) { pages.add(pageBuilder.build()); pageBuilder.reset(); @@ -200,7 +185,7 @@ public static class MultiChannelBenchmarkData @Param(GROUP_COUNT_STRING) private int groupCount = GROUP_COUNT; - @Param({"PRECOMPUTED", "CACHED", "ON_DEMAND"}) + @Param({"CACHED", "ON_DEMAND"}) private GroupByHashMode hashMode = GroupByHashMode.ON_DEMAND; @Param({"VARCHAR", "BIGINT"}) @@ -215,11 +200,11 @@ public void setup() switch (dataType) { case "VARCHAR" -> { types = Collections.nCopies(channelCount, VARCHAR); - pages = createVarcharPages(POSITIONS, groupCount, channelCount, isHashPrecomputed()); + pages = createVarcharPages(POSITIONS, groupCount, channelCount); } case "BIGINT" -> { types = Collections.nCopies(channelCount, BIGINT); - pages = createBigintPages(POSITIONS, groupCount, channelCount, isHashPrecomputed(), false); + pages = createBigintPages(POSITIONS, groupCount, channelCount, false); } default -> throw new UnsupportedOperationException("Unsupported dataType"); } @@ -240,11 +225,6 @@ public List getTypes() return types; } - public boolean isHashPrecomputed() - { - return hashMode.isHashPrecomputed(); - } - public GroupByHashMode getFlatGroupByHashMode() { return hashMode; @@ -271,9 +251,6 @@ public void setup(MultiChannelBenchmarkData data) } outputTypes = new ArrayList<>(data.getTypes()); - if (data.isHashPrecomputed()) { - outputTypes.add(BIGINT); - } } public GroupByHash getPrefilledHash() diff --git a/core/trino-main/src/test/java/io/trino/operator/BenchmarkGroupByHashOnSimulatedData.java b/core/trino-main/src/test/java/io/trino/operator/BenchmarkGroupByHashOnSimulatedData.java index 319772cd756a..02e6a8fca299 100644 --- a/core/trino-main/src/test/java/io/trino/operator/BenchmarkGroupByHashOnSimulatedData.java +++ b/core/trino-main/src/test/java/io/trino/operator/BenchmarkGroupByHashOnSimulatedData.java @@ -258,7 +258,7 @@ public void setup() .map(channel -> channel.columnType.type) .collect(toImmutableList()); pages = createPages(query); - hashMode = GroupByHash.selectGroupByHashMode(false, false, types); + hashMode = GroupByHash.selectGroupByHashMode(false, types); } private List createPages(AggregationDefinition definition) diff --git a/core/trino-main/src/test/java/io/trino/operator/BenchmarkHashAndStreamingAggregationOperators.java b/core/trino-main/src/test/java/io/trino/operator/BenchmarkHashAndStreamingAggregationOperators.java index 8daf3c09e3b5..2c4713959589 100644 --- a/core/trino-main/src/test/java/io/trino/operator/BenchmarkHashAndStreamingAggregationOperators.java +++ b/core/trino-main/src/test/java/io/trino/operator/BenchmarkHashAndStreamingAggregationOperators.java @@ -203,7 +203,7 @@ public void setup() pages = pagesBuilder.build(); if (hashAggregation) { - operatorFactory = createHashAggregationOperatorFactory(pagesBuilder.getHashChannel(), hashTypes, hashChannels, sumChannel); + operatorFactory = createHashAggregationOperatorFactory(hashTypes, hashChannels, sumChannel); } else { operatorFactory = createStreamingAggregationOperatorFactory(hashTypes, hashChannels, sumChannel); @@ -235,7 +235,6 @@ private OperatorFactory createStreamingAggregationOperatorFactory( } private OperatorFactory createHashAggregationOperatorFactory( - Optional hashChannel, List hashTypes, List hashChannels, int sumChannel) @@ -253,7 +252,6 @@ private OperatorFactory createHashAggregationOperatorFactory( ImmutableList.of( COUNT.createAggregatorFactory(SINGLE, ImmutableList.of(0), OptionalInt.empty()), LONG_SUM.createAggregatorFactory(SINGLE, ImmutableList.of(sumChannel), OptionalInt.empty())), - hashChannel, Optional.empty(), 100_000, Optional.of(DataSize.of(16, MEGABYTE)), diff --git a/core/trino-main/src/test/java/io/trino/operator/TestDistinctLimitOperator.java b/core/trino-main/src/test/java/io/trino/operator/TestDistinctLimitOperator.java index 42faa8734c3a..3562c17928dd 100644 --- a/core/trino-main/src/test/java/io/trino/operator/TestDistinctLimitOperator.java +++ b/core/trino-main/src/test/java/io/trino/operator/TestDistinctLimitOperator.java @@ -27,7 +27,6 @@ import org.junit.jupiter.api.parallel.Execution; import java.util.List; -import java.util.Optional; import java.util.concurrent.ExecutorService; import java.util.concurrent.ScheduledExecutorService; @@ -84,7 +83,6 @@ public void testDistinctLimit(boolean hashEnabled) rowPagesBuilder.getTypes(), Ints.asList(0), 5, - rowPagesBuilder.getHashChannel(), hashStrategyCompiler); MaterializedResult expected = resultBuilder(driverContext.getSession(), BIGINT) @@ -120,7 +118,6 @@ public void testDistinctLimitWithPageAlignment(boolean hashEnabled) rowPagesBuilder.getTypes(), Ints.asList(0), 3, - rowPagesBuilder.getHashChannel(), hashStrategyCompiler); MaterializedResult expected = resultBuilder(driverContext.getSession(), BIGINT) @@ -155,7 +152,6 @@ public void testDistinctLimitValuesLessThanLimit(boolean hashEnabled) rowPagesBuilder.getTypes(), Ints.asList(0), 5, - rowPagesBuilder.getHashChannel(), hashStrategyCompiler); MaterializedResult expected = resultBuilder(driverContext.getSession(), BIGINT) @@ -185,7 +181,6 @@ public void testMemoryReservationYield(Type type) ImmutableList.of(type, BIGINT), ImmutableList.of(0), Integer.MAX_VALUE, - Optional.of(1), hashStrategyCompiler); GroupByHashYieldAssertion.GroupByHashYieldResult result = finishOperatorWithYieldingGroupByHash(input, type, operatorFactory, operator -> ((DistinctLimitOperator) operator).getCapacity(), 450_000); diff --git a/core/trino-main/src/test/java/io/trino/operator/TestGroupByHash.java b/core/trino-main/src/test/java/io/trino/operator/TestGroupByHash.java index 9225f193de6a..0f927dd1cbab 100644 --- a/core/trino-main/src/test/java/io/trino/operator/TestGroupByHash.java +++ b/core/trino-main/src/test/java/io/trino/operator/TestGroupByHash.java @@ -44,7 +44,6 @@ import static io.trino.operator.UpdateMemory.NOOP; import static io.trino.spi.type.BigintType.BIGINT; import static io.trino.spi.type.VarcharType.VARCHAR; -import static io.trino.type.TypeTestUtils.getHashBlock; import static org.assertj.core.api.Assertions.assertThat; public class TestGroupByHash @@ -68,10 +67,10 @@ public GroupByHash createGroupByHash() public GroupByHash createGroupByHash(int expectedSize, UpdateMemory updateMemory) { return switch (this) { - case BIGINT -> new BigintGroupByHash(true, expectedSize, updateMemory); + case BIGINT -> new BigintGroupByHash(expectedSize, updateMemory); case FLAT -> new FlatGroupByHash( ImmutableList.of(BigintType.BIGINT), - GroupByHashMode.PRECOMPUTED, + GroupByHashMode.ON_DEMAND, expectedSize, true, new FlatHashStrategyCompiler(new TypeOperators()), @@ -88,8 +87,7 @@ public void testAddPage() for (int tries = 0; tries < 2; tries++) { for (int value = 0; value < MAX_GROUP_ID; value++) { Block block = createLongsBlock(value); - Block hashBlock = getHashBlock(ImmutableList.of(BIGINT), block); - Page page = new Page(block, hashBlock); + Page page = new Page(block); for (int addValuesTries = 0; addValuesTries < 10; addValuesTries++) { groupByHash.addPage(page).process(); assertThat(groupByHash.getGroupCount()).isEqualTo(tries == 0 ? value + 1 : MAX_GROUP_ID); @@ -114,10 +112,7 @@ public void testRunLengthEncodedInputPage() for (GroupByHashType groupByHashType : GroupByHashType.values()) { GroupByHash groupByHash = groupByHashType.createGroupByHash(); Block block = createLongsBlock(0L); - Block hashBlock = getHashBlock(ImmutableList.of(BIGINT), block); - Page page = new Page( - RunLengthEncodedBlock.create(block, 2), - RunLengthEncodedBlock.create(hashBlock, 2)); + Page page = new Page(RunLengthEncodedBlock.create(block, 2)); groupByHash.addPage(page).process(); @@ -146,11 +141,8 @@ public void testDictionaryInputPage() for (GroupByHashType groupByHashType : GroupByHashType.values()) { GroupByHash groupByHash = groupByHashType.createGroupByHash(); Block block = createLongsBlock(0L, 1L); - Block hashBlock = getHashBlock(ImmutableList.of(BIGINT), block); int[] ids = new int[] {0, 0, 1, 1}; - Page page = new Page( - DictionaryBlock.create(ids.length, block, ids), - DictionaryBlock.create(ids.length, hashBlock, ids)); + Page page = new Page(DictionaryBlock.create(ids.length, block, ids)); groupByHash.addPage(page).process(); @@ -173,22 +165,19 @@ public void testNullGroup() GroupByHash groupByHash = groupByHashType.createGroupByHash(); Block block = createLongsBlock(0L, null); - Block hashBlock = getHashBlock(ImmutableList.of(BIGINT), block); - Page page = new Page(block, hashBlock); + Page page = new Page(block); // assign null a groupId (which is one since is it the second value added) assertThat(getGroupIds(groupByHash, page)) .containsExactly(0, 1); // Add enough values to force a rehash block = createLongSequenceBlock(1, 132748); - hashBlock = getHashBlock(ImmutableList.of(BIGINT), block); - page = new Page(block, hashBlock); + page = new Page(block); groupByHash.addPage(page).process(); block = createLongsBlock((Long) null); - hashBlock = getHashBlock(ImmutableList.of(BIGINT), block); // null groupId will be 0 (as set above) - assertThat(getGroupIds(groupByHash, new Page(block, hashBlock))) + assertThat(getGroupIds(groupByHash, new Page(block))) .containsExactly(1); } } @@ -200,9 +189,7 @@ public void testGetGroupIds() GroupByHash groupByHash = groupByHashType.createGroupByHash(); for (int tries = 0; tries < 2; tries++) { for (int value = 0; value < MAX_GROUP_ID; value++) { - Block block = createLongsBlock(value); - Block hashBlock = getHashBlock(ImmutableList.of(BIGINT), block); - Page page = new Page(block, hashBlock); + Page page = new Page(createLongsBlock(value)); for (int addValuesTries = 0; addValuesTries < 10; addValuesTries++) { int[] groupIds = getGroupIds(groupByHash, page); assertThat(groupByHash.getGroupCount()).isEqualTo(tries == 0 ? value + 1 : MAX_GROUP_ID); @@ -220,16 +207,15 @@ public void testAppendTo() { for (GroupByHashType groupByHashType : GroupByHashType.values()) { Block valuesBlock = createLongSequenceBlock(0, 100); - Block hashBlock = getHashBlock(ImmutableList.of(BIGINT), valuesBlock); GroupByHash groupByHash = groupByHashType.createGroupByHash(); - int[] groupIds = getGroupIds(groupByHash, new Page(valuesBlock, hashBlock)); + int[] groupIds = getGroupIds(groupByHash, new Page(valuesBlock)); for (int i = 0; i < valuesBlock.getPositionCount(); i++) { assertThat(groupIds[i]).isEqualTo(i); } assertThat(groupByHash.getGroupCount()).isEqualTo(100); - PageBuilder pageBuilder = new PageBuilder(ImmutableList.of(BIGINT, BIGINT)); + PageBuilder pageBuilder = new PageBuilder(ImmutableList.of(BIGINT)); for (int i = 0; i < groupByHash.getGroupCount(); i++) { pageBuilder.declarePosition(); groupByHash.appendValuesTo(i, pageBuilder); @@ -241,7 +227,6 @@ public void testAppendTo() } assertThat(page.getPositionCount()).isEqualTo(100); BlockAssertions.assertBlockEquals(BIGINT, page.getBlock(0), valuesBlock); - BlockAssertions.assertBlockEquals(BIGINT, page.getBlock(1), hashBlock); } } @@ -254,13 +239,12 @@ public void testAppendToMultipleTuplesPerGroup() values.add(i % 50); } Block valuesBlock = createLongsBlock(values); - Block hashBlock = getHashBlock(ImmutableList.of(BIGINT), valuesBlock); GroupByHash groupByHash = groupByHashType.createGroupByHash(); - groupByHash.getGroupIds(new Page(valuesBlock, hashBlock)).process(); + groupByHash.getGroupIds(new Page(valuesBlock)).process(); assertThat(groupByHash.getGroupCount()).isEqualTo(50); - PageBuilder pageBuilder = new PageBuilder(ImmutableList.of(BIGINT, BIGINT)); + PageBuilder pageBuilder = new PageBuilder(ImmutableList.of(BIGINT)); for (int i = 0; i < groupByHash.getGroupCount(); i++) { pageBuilder.declarePosition(); groupByHash.appendValuesTo(i, pageBuilder); @@ -277,15 +261,14 @@ public void testForceRehash() for (GroupByHashType groupByHashType : GroupByHashType.values()) { // Create a page with positionCount >> expected size of groupByHash Block valuesBlock = createLongSequenceBlock(0, 100); - Block hashBlock = getHashBlock(ImmutableList.of(BIGINT), valuesBlock); // Create GroupByHash with tiny size GroupByHash groupByHash = groupByHashType.createGroupByHash(4, NOOP); - groupByHash.getGroupIds(new Page(valuesBlock, hashBlock)).process(); + groupByHash.getGroupIds(new Page(valuesBlock)).process(); // Ensure that all groups are present in GroupByHash int groupCount = groupByHash.getGroupCount(); - for (int groupId : getGroupIds(groupByHash, new Page(valuesBlock, hashBlock))) { + for (int groupId : getGroupIds(groupByHash, new Page(valuesBlock))) { assertThat(groupId).isLessThan(groupCount); } } @@ -298,15 +281,14 @@ public void testUpdateMemoryVarchar() // Create a page with positionCount >> expected size of groupByHash Block valuesBlock = createStringSequenceBlock(0, 1_000_000); - Block hashBlock = getHashBlock(ImmutableList.of(type), valuesBlock); // Create GroupByHash with tiny size AtomicInteger rehashCount = new AtomicInteger(); - GroupByHash groupByHash = createGroupByHash(ImmutableList.of(type), selectGroupByHashMode(true, false, ImmutableList.of(type)), 1, false, new FlatHashStrategyCompiler(new TypeOperators()), () -> { + GroupByHash groupByHash = createGroupByHash(ImmutableList.of(type), selectGroupByHashMode(false, ImmutableList.of(type)), 1, false, new FlatHashStrategyCompiler(new TypeOperators()), () -> { rehashCount.incrementAndGet(); return true; }); - groupByHash.addPage(new Page(valuesBlock, hashBlock)).process(); + groupByHash.addPage(new Page(valuesBlock)).process(); // assert we call update memory twice every time we rehash; the rehash count = log2(length / FILL_RATIO) assertThat(rehashCount.get()).isEqualTo(2 * VARCHAR_EXPECTED_REHASH); @@ -319,15 +301,14 @@ public void testUpdateMemoryBigint() // Create a page with positionCount >> expected size of groupByHash Block valuesBlock = createLongSequenceBlock(0, 1_000_000); - Block hashBlock = getHashBlock(ImmutableList.of(type), valuesBlock); // Create GroupByHash with tiny size AtomicInteger rehashCount = new AtomicInteger(); - GroupByHash groupByHash = createGroupByHash(ImmutableList.of(type), selectGroupByHashMode(true, false, ImmutableList.of(type)), 1, false, new FlatHashStrategyCompiler(new TypeOperators()), () -> { + GroupByHash groupByHash = createGroupByHash(ImmutableList.of(type), selectGroupByHashMode(false, ImmutableList.of(type)), 1, false, new FlatHashStrategyCompiler(new TypeOperators()), () -> { rehashCount.incrementAndGet(); return true; }); - groupByHash.addPage(new Page(valuesBlock, hashBlock)).process(); + groupByHash.addPage(new Page(valuesBlock)).process(); // assert we call update memory twice every time we rehash; the rehash count = log2(length / FILL_RATIO) assertThat(rehashCount.get()).isEqualTo(2 * BIGINT_EXPECTED_REHASH); @@ -344,8 +325,7 @@ public void testMemoryReservationYield() private static void testMemoryReservationYield(Type type, Block valuesBlock, int length, int expectedRehash) { - Block hashBlock = getHashBlock(ImmutableList.of(type), valuesBlock); - Page page = new Page(valuesBlock, hashBlock); + Page page = new Page(valuesBlock); AtomicInteger currentQuota = new AtomicInteger(0); AtomicInteger allowedQuota = new AtomicInteger(6); UpdateMemory updateMemory = () -> { @@ -358,7 +338,7 @@ private static void testMemoryReservationYield(Type type, Block valuesBlock, int int yields = 0; // test addPage - GroupByHash groupByHash = createGroupByHash(ImmutableList.of(type), selectGroupByHashMode(true, false, ImmutableList.of(type)), 1, false, new FlatHashStrategyCompiler(new TypeOperators()), updateMemory); + GroupByHash groupByHash = createGroupByHash(ImmutableList.of(type), selectGroupByHashMode(false, ImmutableList.of(type)), 1, false, new FlatHashStrategyCompiler(new TypeOperators()), updateMemory); boolean finish = false; Work addPageWork = groupByHash.addPage(page); while (!finish) { @@ -384,7 +364,7 @@ private static void testMemoryReservationYield(Type type, Block valuesBlock, int currentQuota.set(0); allowedQuota.set(6); yields = 0; - groupByHash = createGroupByHash(ImmutableList.of(type), selectGroupByHashMode(true, false, ImmutableList.of(type)), 1, false, new FlatHashStrategyCompiler(new TypeOperators()), updateMemory); + groupByHash = createGroupByHash(ImmutableList.of(type), selectGroupByHashMode(false, ImmutableList.of(type)), 1, false, new FlatHashStrategyCompiler(new TypeOperators()), updateMemory); finish = false; Work getGroupIdsWork = groupByHash.getGroupIds(page); @@ -416,8 +396,7 @@ public void testMemoryReservationYieldWithDictionary() int length = 2_000_000; int[] ids = IntStream.range(0, dictionaryLength).toArray(); Block valuesBlock = DictionaryBlock.create(dictionaryLength, createLongSequenceBlock(0, length), ids); - Block hashBlock = DictionaryBlock.create(dictionaryLength, getHashBlock(ImmutableList.of(BIGINT), valuesBlock), ids); - Page page = new Page(valuesBlock, hashBlock); + Page page = new Page(valuesBlock); AtomicInteger currentQuota = new AtomicInteger(0); AtomicInteger allowedQuota = new AtomicInteger(6); UpdateMemory updateMemory = () -> { @@ -451,7 +430,7 @@ public void testMemoryReservationYieldWithDictionary() // assert we yield for every 3 rehashes // currentQuota is essentially the count we have successfully rehashed multiplied by 2 (as updateMemory is called twice per rehash) // the rehash count is 10 = log(1_000 / 0.75) - assertThat(currentQuota.get()).isEqualTo(2 * (groupByHashType == GroupByHashType.FLAT ? 4 : 13)); + assertThat(currentQuota.get()).isEqualTo(2 * (groupByHashType == GroupByHashType.FLAT ? 10 : 13)); assertThat(currentQuota.get() / 3 / 2).isEqualTo(yields); // test getGroupIds @@ -480,7 +459,7 @@ public void testMemoryReservationYieldWithDictionary() // assert we yield for every 3 rehashes // currentQuota is essentially the count we have successfully rehashed multiplied by 2 (as updateMemory is called twice per rehash) // the rehash count is 10 = log2(1_000 / 0.75) - assertThat(currentQuota.get()).isEqualTo(2 * (groupByHashType == GroupByHashType.FLAT ? 4 : 13)); + assertThat(currentQuota.get()).isEqualTo(2 * (groupByHashType == GroupByHashType.FLAT ? 10 : 13)); assertThat(currentQuota.get() / 3 / 2).isEqualTo(yields); } } @@ -492,7 +471,6 @@ public void testLowCardinalityDictionariesAddPage() TEST_SESSION, ImmutableList.of(BIGINT, BIGINT), false, - false, 100, new FlatHashStrategyCompiler(new TypeOperators()), NOOP); @@ -521,7 +499,6 @@ public void testLowCardinalityDictionariesGetGroupIds() TEST_SESSION, ImmutableList.of(BIGINT, BIGINT, BIGINT, BIGINT, BIGINT), false, - false, 100, new FlatHashStrategyCompiler(new TypeOperators()), NOOP); @@ -530,7 +507,6 @@ public void testLowCardinalityDictionariesGetGroupIds() TEST_SESSION, ImmutableList.of(BIGINT, BIGINT, BIGINT, BIGINT), false, - false, 100, new FlatHashStrategyCompiler(new TypeOperators()), NOOP); @@ -564,7 +540,6 @@ public void testLowCardinalityDictionariesProperGroupIdOrder() TEST_SESSION, ImmutableList.of(BIGINT, BIGINT), false, - false, 100, new FlatHashStrategyCompiler(new TypeOperators()), NOOP); @@ -631,7 +606,7 @@ public void testProperWorkTypesSelected() private static void assertGroupByHashWork(Page page, List types, Class clazz) { - GroupByHash groupByHash = createGroupByHash(types, selectGroupByHashMode(false, false, types), 100, true, new FlatHashStrategyCompiler(new TypeOperators()), NOOP); + GroupByHash groupByHash = createGroupByHash(types, selectGroupByHashMode(false, types), 100, true, new FlatHashStrategyCompiler(new TypeOperators()), NOOP); Work work = groupByHash.getGroupIds(page); // Compare by name since classes are private assertThat(work).isInstanceOf(clazz); diff --git a/core/trino-main/src/test/java/io/trino/operator/TestGroupedTopNRankBuilder.java b/core/trino-main/src/test/java/io/trino/operator/TestGroupedTopNRankBuilder.java index 6b43e15b5e87..71485c8a531e 100644 --- a/core/trino-main/src/test/java/io/trino/operator/TestGroupedTopNRankBuilder.java +++ b/core/trino-main/src/test/java/io/trino/operator/TestGroupedTopNRankBuilder.java @@ -258,7 +258,7 @@ private GroupByHash createGroupByHash(Type partitionType, UpdateMemory updateMem { return GroupByHash.createGroupByHash( ImmutableList.of(partitionType), - GroupByHash.selectGroupByHashMode(false, false, ImmutableList.of(partitionType)), + GroupByHash.selectGroupByHashMode(false, ImmutableList.of(partitionType)), 1, false, new FlatHashStrategyCompiler(typeOperators), diff --git a/core/trino-main/src/test/java/io/trino/operator/TestGroupedTopNRowNumberBuilder.java b/core/trino-main/src/test/java/io/trino/operator/TestGroupedTopNRowNumberBuilder.java index 6835661fd69a..8137ae5f4e36 100644 --- a/core/trino-main/src/test/java/io/trino/operator/TestGroupedTopNRowNumberBuilder.java +++ b/core/trino-main/src/test/java/io/trino/operator/TestGroupedTopNRowNumberBuilder.java @@ -237,7 +237,7 @@ private static GroupByHash createGroupByHash(List partitionTypes, UpdateMe { return GroupByHash.createGroupByHash( partitionTypes, - GroupByHash.selectGroupByHashMode(false, false, partitionTypes), + GroupByHash.selectGroupByHashMode(false, partitionTypes), 1, false, new FlatHashStrategyCompiler(new TypeOperators()), diff --git a/core/trino-main/src/test/java/io/trino/operator/TestHashAggregationOperator.java b/core/trino-main/src/test/java/io/trino/operator/TestHashAggregationOperator.java index 09fab9cea37a..acba5f72b28f 100644 --- a/core/trino-main/src/test/java/io/trino/operator/TestHashAggregationOperator.java +++ b/core/trino-main/src/test/java/io/trino/operator/TestHashAggregationOperator.java @@ -76,7 +76,6 @@ import static io.trino.operator.GroupByHashYieldAssertion.finishOperatorWithYieldingGroupByHash; import static io.trino.operator.OperatorAssertion.assertOperatorEqualsIgnoreOrder; import static io.trino.operator.OperatorAssertion.assertPagesEqualIgnoreOrder; -import static io.trino.operator.OperatorAssertion.dropChannel; import static io.trino.operator.OperatorAssertion.toMaterializedResult; import static io.trino.operator.OperatorAssertion.toPages; import static io.trino.operator.SpillMetrics.SPILL_COUNT_METRIC_NAME; @@ -126,18 +125,16 @@ public void tearDown() @Test public void testHashAggregation() { - testHashAggregation(true, true, true, 8, Integer.MAX_VALUE); - testHashAggregation(true, true, false, 8, Integer.MAX_VALUE); - testHashAggregation(false, false, false, 0, 0); - testHashAggregation(false, true, true, 0, 0); - testHashAggregation(false, true, false, 0, 0); - testHashAggregation(false, true, true, 8, 0); - testHashAggregation(false, true, false, 8, 0); - testHashAggregation(false, true, true, 8, Integer.MAX_VALUE); - testHashAggregation(false, true, false, 8, Integer.MAX_VALUE); + testHashAggregation(true, true, 8, Integer.MAX_VALUE); + testHashAggregation(true, false, 8, Integer.MAX_VALUE); + testHashAggregation(false, false, 0, 0); + testHashAggregation(true, true, 0, 0); + testHashAggregation(true, false, 0, 0); + testHashAggregation(true, true, 8, 0); + testHashAggregation(true, false, 8, 0); } - private void testHashAggregation(boolean hashEnabled, boolean spillEnabled, boolean revokeMemoryWhenAddingPages, long memoryLimitForMerge, long memoryLimitForMergeWithMemory) + private void testHashAggregation(boolean spillEnabled, boolean revokeMemoryWhenAddingPages, long memoryLimitForMerge, long memoryLimitForMergeWithMemory) { DummySpillerFactory spillerFactory = new DummySpillerFactory(); @@ -147,7 +144,7 @@ private void testHashAggregation(boolean hashEnabled, boolean spillEnabled, bool TestingAggregationFunction countBooleanColumn = FUNCTION_RESOLUTION.getAggregateFunction("count", fromTypes(BOOLEAN)); TestingAggregationFunction maxVarcharColumn = FUNCTION_RESOLUTION.getAggregateFunction("max", fromTypes(VARCHAR)); List hashChannels = Ints.asList(1); - RowPagesBuilder rowPagesBuilder = rowPagesBuilder(hashEnabled, hashChannels, VARCHAR, VARCHAR, VARCHAR, BIGINT, BOOLEAN); + RowPagesBuilder rowPagesBuilder = rowPagesBuilder(false, hashChannels, VARCHAR, VARCHAR, VARCHAR, BIGINT, BOOLEAN); List input = rowPagesBuilder .addSequencePage(numberOfRows, 100, 0, 100_000, 0, 500) .addSequencePage(numberOfRows, 100, 0, 200_000, 0, 500) @@ -168,7 +165,6 @@ private void testHashAggregation(boolean hashEnabled, boolean spillEnabled, bool maxVarcharColumn.createAggregatorFactory(SINGLE, ImmutableList.of(2), OptionalInt.empty()), countVarcharColumn.createAggregatorFactory(SINGLE, ImmutableList.of(0), OptionalInt.empty()), countBooleanColumn.createAggregatorFactory(SINGLE, ImmutableList.of(4), OptionalInt.empty())), - rowPagesBuilder.getHashChannel(), Optional.empty(), 100_000, Optional.of(DataSize.of(16, MEGABYTE)), @@ -189,7 +185,7 @@ private void testHashAggregation(boolean hashEnabled, boolean spillEnabled, bool List pages = toPages(operatorFactory, driverContext, input, revokeMemoryWhenAddingPages); assertThat(pages).as("Expected more than one output page").hasSizeGreaterThan(1); - assertPagesEqualIgnoreOrder(driverContext, pages, expected, hashEnabled, Optional.of(hashChannels.size())); + assertPagesEqualIgnoreOrder(driverContext, pages, expected, false, Optional.of(hashChannels.size())); assertThat(spillEnabled == (spillerFactory.getSpillsCount() > 0)) .describedAs(format("Spill state mismatch. Expected spill: %s, spill count: %s", spillEnabled, spillerFactory.getSpillsCount())) @@ -199,18 +195,16 @@ private void testHashAggregation(boolean hashEnabled, boolean spillEnabled, bool @Test public void testHashAggregationWithGlobals() { - testHashAggregationWithGlobals(true, true, true, 8, Integer.MAX_VALUE); - testHashAggregationWithGlobals(true, true, false, 8, Integer.MAX_VALUE); - testHashAggregationWithGlobals(false, false, false, 0, 0); - testHashAggregationWithGlobals(false, true, true, 0, 0); - testHashAggregationWithGlobals(false, true, false, 0, 0); - testHashAggregationWithGlobals(false, true, true, 8, 0); - testHashAggregationWithGlobals(false, true, false, 8, 0); - testHashAggregationWithGlobals(false, true, true, 8, Integer.MAX_VALUE); - testHashAggregationWithGlobals(false, true, false, 8, Integer.MAX_VALUE); + testHashAggregationWithGlobals(true, true, 8, Integer.MAX_VALUE); + testHashAggregationWithGlobals(true, false, 8, Integer.MAX_VALUE); + testHashAggregationWithGlobals(false, false, 0, 0); + testHashAggregationWithGlobals(true, true, 0, 0); + testHashAggregationWithGlobals(true, false, 0, 0); + testHashAggregationWithGlobals(true, true, 8, 0); + testHashAggregationWithGlobals(true, false, 8, 0); } - private void testHashAggregationWithGlobals(boolean hashEnabled, boolean spillEnabled, boolean revokeMemoryWhenAddingPages, long memoryLimitForMerge, long memoryLimitForMergeWithMemory) + private void testHashAggregationWithGlobals(boolean spillEnabled, boolean revokeMemoryWhenAddingPages, long memoryLimitForMerge, long memoryLimitForMergeWithMemory) { DummySpillerFactory spillerFactory = new DummySpillerFactory(); @@ -221,7 +215,7 @@ private void testHashAggregationWithGlobals(boolean hashEnabled, boolean spillEn Optional groupIdChannel = Optional.of(1); List groupByChannels = Ints.asList(1, 2); List globalAggregationGroupIds = Ints.asList(42, 49); - RowPagesBuilder rowPagesBuilder = rowPagesBuilder(hashEnabled, groupByChannels, VARCHAR, VARCHAR, VARCHAR, BIGINT, BIGINT, BOOLEAN); + RowPagesBuilder rowPagesBuilder = rowPagesBuilder(false, groupByChannels, VARCHAR, VARCHAR, VARCHAR, BIGINT, BIGINT, BOOLEAN); List input = rowPagesBuilder.build(); HashAggregationOperatorFactory operatorFactory = new HashAggregationOperatorFactory( @@ -238,7 +232,6 @@ private void testHashAggregationWithGlobals(boolean hashEnabled, boolean spillEn maxVarcharColumn.createAggregatorFactory(SINGLE, ImmutableList.of(2), OptionalInt.empty()), countVarcharColumn.createAggregatorFactory(SINGLE, ImmutableList.of(0), OptionalInt.empty()), countBooleanColumn.createAggregatorFactory(SINGLE, ImmutableList.of(5), OptionalInt.empty())), - rowPagesBuilder.getHashChannel(), groupIdChannel, 100_000, Optional.of(DataSize.of(16, MEGABYTE)), @@ -255,31 +248,29 @@ private void testHashAggregationWithGlobals(boolean hashEnabled, boolean spillEn .row(null, 49L, 0L, null, null, null, 0L, 0L) .build(); - assertOperatorEqualsIgnoreOrder(operatorFactory, driverContext, input, expected, hashEnabled, Optional.of(groupByChannels.size()), revokeMemoryWhenAddingPages); + assertOperatorEqualsIgnoreOrder(operatorFactory, driverContext, input, expected, false, Optional.of(groupByChannels.size()), revokeMemoryWhenAddingPages); } @Test public void testHashAggregationMemoryReservation() { - testHashAggregationMemoryReservation(true, true, true, 8, Integer.MAX_VALUE); - testHashAggregationMemoryReservation(true, true, false, 8, Integer.MAX_VALUE); - testHashAggregationMemoryReservation(false, false, false, 0, 0); - testHashAggregationMemoryReservation(false, true, true, 0, 0); - testHashAggregationMemoryReservation(false, true, false, 0, 0); - testHashAggregationMemoryReservation(false, true, true, 8, 0); - testHashAggregationMemoryReservation(false, true, false, 8, 0); - testHashAggregationMemoryReservation(false, true, true, 8, Integer.MAX_VALUE); - testHashAggregationMemoryReservation(false, true, false, 8, Integer.MAX_VALUE); + testHashAggregationMemoryReservation(true, true, 8, Integer.MAX_VALUE); + testHashAggregationMemoryReservation(true, false, 8, Integer.MAX_VALUE); + testHashAggregationMemoryReservation(false, false, 0, 0); + testHashAggregationMemoryReservation(true, true, 0, 0); + testHashAggregationMemoryReservation(true, false, 0, 0); + testHashAggregationMemoryReservation(true, true, 8, 0); + testHashAggregationMemoryReservation(true, false, 8, 0); } - private void testHashAggregationMemoryReservation(boolean hashEnabled, boolean spillEnabled, boolean revokeMemoryWhenAddingPages, long memoryLimitForMerge, long memoryLimitForMergeWithMemory) + private void testHashAggregationMemoryReservation(boolean spillEnabled, boolean revokeMemoryWhenAddingPages, long memoryLimitForMerge, long memoryLimitForMergeWithMemory) { DummySpillerFactory spillerFactory = new DummySpillerFactory(); TestingAggregationFunction arrayAggColumn = FUNCTION_RESOLUTION.getAggregateFunction("array_agg", fromTypes(BIGINT)); List hashChannels = Ints.asList(1); - RowPagesBuilder rowPagesBuilder = rowPagesBuilder(hashEnabled, hashChannels, BIGINT, BIGINT); + RowPagesBuilder rowPagesBuilder = rowPagesBuilder(false, hashChannels, BIGINT, BIGINT); List input = rowPagesBuilder .addSequencePage(10, 100, 0) .addSequencePage(10, 200, 0) @@ -299,7 +290,6 @@ private void testHashAggregationMemoryReservation(boolean hashEnabled, boolean s SINGLE, true, ImmutableList.of(arrayAggColumn.createAggregatorFactory(SINGLE, ImmutableList.of(0), OptionalInt.empty())), - rowPagesBuilder.getHashChannel(), Optional.empty(), 100_000, Optional.of(DataSize.of(16, MEGABYTE)), @@ -320,67 +310,57 @@ private void testHashAggregationMemoryReservation(boolean hashEnabled, boolean s @Test public void testMemoryLimit() { - assertThatThrownBy(() -> testMemoryLimit(true)) - .isInstanceOf(ExceededMemoryLimitException.class) - .hasMessageMatching("Query exceeded per-node memory limit of 10B.*"); + assertThatThrownBy(() -> { + TestingAggregationFunction maxVarcharColumn = FUNCTION_RESOLUTION.getAggregateFunction("max", fromTypes(VARCHAR)); + + List hashChannels = Ints.asList(1); + RowPagesBuilder rowPagesBuilder = rowPagesBuilder(false, hashChannels, VARCHAR, BIGINT, VARCHAR, BIGINT); + List input = rowPagesBuilder + .addSequencePage(10, 100, 0, 100, 0) + .addSequencePage(10, 100, 0, 200, 0) + .addSequencePage(10, 100, 0, 300, 0) + .build(); - assertThatThrownBy(() -> testMemoryLimit(false)) + DriverContext driverContext = createTaskContext(executor, scheduledExecutor, TEST_SESSION, DataSize.ofBytes(10)) + .addPipelineContext(0, true, true, false) + .addDriverContext(); + + HashAggregationOperatorFactory operatorFactory = new HashAggregationOperatorFactory( + 0, + new PlanNodeId("test"), + ImmutableList.of(BIGINT), + hashChannels, + ImmutableList.of(), + SINGLE, + ImmutableList.of(COUNT.createAggregatorFactory(SINGLE, ImmutableList.of(0), OptionalInt.empty()), + LONG_MIN.createAggregatorFactory(SINGLE, ImmutableList.of(3), OptionalInt.empty()), + LONG_AVERAGE.createAggregatorFactory(SINGLE, ImmutableList.of(3), OptionalInt.empty()), + maxVarcharColumn.createAggregatorFactory(SINGLE, ImmutableList.of(2), OptionalInt.empty())), + Optional.empty(), + 100_000, + Optional.of(DataSize.of(16, MEGABYTE)), + hashStrategyCompiler, + Optional.empty()); + + toPages(operatorFactory, driverContext, input); + }) .isInstanceOf(ExceededMemoryLimitException.class) .hasMessageMatching("Query exceeded per-node memory limit of 10B.*"); } - private void testMemoryLimit(boolean hashEnabled) - { - TestingAggregationFunction maxVarcharColumn = FUNCTION_RESOLUTION.getAggregateFunction("max", fromTypes(VARCHAR)); - - List hashChannels = Ints.asList(1); - RowPagesBuilder rowPagesBuilder = rowPagesBuilder(hashEnabled, hashChannels, VARCHAR, BIGINT, VARCHAR, BIGINT); - List input = rowPagesBuilder - .addSequencePage(10, 100, 0, 100, 0) - .addSequencePage(10, 100, 0, 200, 0) - .addSequencePage(10, 100, 0, 300, 0) - .build(); - - DriverContext driverContext = createTaskContext(executor, scheduledExecutor, TEST_SESSION, DataSize.ofBytes(10)) - .addPipelineContext(0, true, true, false) - .addDriverContext(); - - HashAggregationOperatorFactory operatorFactory = new HashAggregationOperatorFactory( - 0, - new PlanNodeId("test"), - ImmutableList.of(BIGINT), - hashChannels, - ImmutableList.of(), - SINGLE, - ImmutableList.of(COUNT.createAggregatorFactory(SINGLE, ImmutableList.of(0), OptionalInt.empty()), - LONG_MIN.createAggregatorFactory(SINGLE, ImmutableList.of(3), OptionalInt.empty()), - LONG_AVERAGE.createAggregatorFactory(SINGLE, ImmutableList.of(3), OptionalInt.empty()), - maxVarcharColumn.createAggregatorFactory(SINGLE, ImmutableList.of(2), OptionalInt.empty())), - rowPagesBuilder.getHashChannel(), - Optional.empty(), - 100_000, - Optional.of(DataSize.of(16, MEGABYTE)), - hashStrategyCompiler, - Optional.empty()); - - toPages(operatorFactory, driverContext, input); - } - @Test public void testHashBuilderResize() { - testHashBuilderResize(true, true, true, 8, Integer.MAX_VALUE); - testHashBuilderResize(true, true, false, 8, Integer.MAX_VALUE); - testHashBuilderResize(false, false, false, 0, 0); - testHashBuilderResize(false, true, true, 0, 0); - testHashBuilderResize(false, true, false, 0, 0); - testHashBuilderResize(false, true, true, 8, 0); - testHashBuilderResize(false, true, false, 8, 0); - testHashBuilderResize(false, true, true, 8, Integer.MAX_VALUE); - testHashBuilderResize(false, true, false, 8, Integer.MAX_VALUE); + testHashBuilderResize(true, true, 8, Integer.MAX_VALUE); + testHashBuilderResize(true, false, 8, Integer.MAX_VALUE); + testHashBuilderResize(false, false, 0, 0); + testHashBuilderResize(true, true, 0, 0); + testHashBuilderResize(true, false, 0, 0); + testHashBuilderResize(true, true, 8, 0); + testHashBuilderResize(true, false, 8, 0); } - private void testHashBuilderResize(boolean hashEnabled, boolean spillEnabled, boolean revokeMemoryWhenAddingPages, long memoryLimitForMerge, long memoryLimitForMergeWithMemory) + private void testHashBuilderResize(boolean spillEnabled, boolean revokeMemoryWhenAddingPages, long memoryLimitForMerge, long memoryLimitForMergeWithMemory) { DummySpillerFactory spillerFactory = new DummySpillerFactory(); @@ -389,7 +369,7 @@ private void testHashBuilderResize(boolean hashEnabled, boolean spillEnabled, bo builder.build(); List hashChannels = Ints.asList(0); - RowPagesBuilder rowPagesBuilder = rowPagesBuilder(hashEnabled, hashChannels, VARCHAR); + RowPagesBuilder rowPagesBuilder = rowPagesBuilder(false, hashChannels, VARCHAR); List input = rowPagesBuilder .addSequencePage(10, 100) .addBlocksPage(builder.build()) @@ -407,7 +387,6 @@ private void testHashBuilderResize(boolean hashEnabled, boolean spillEnabled, bo SINGLE, false, ImmutableList.of(COUNT.createAggregatorFactory(SINGLE, ImmutableList.of(0), OptionalInt.empty())), - rowPagesBuilder.getHashChannel(), Optional.empty(), 100_000, Optional.of(DataSize.of(16, MEGABYTE)), @@ -440,7 +419,6 @@ public void testMemoryReservationYield(Type type) SINGLE, ImmutableList.of(COUNT.createAggregatorFactory(SINGLE, ImmutableList.of(0), OptionalInt.empty())), Optional.of(1), - Optional.empty(), 1, Optional.of(DataSize.of(16, MEGABYTE)), hashStrategyCompiler, @@ -454,10 +432,10 @@ public void testMemoryReservationYield(Type type) int count = 0; for (Page page : result.getOutput()) { - // value + hash + aggregation result - assertThat(page.getChannelCount()).isEqualTo(3); + // value + aggregation result + assertThat(page.getChannelCount()).isEqualTo(2); for (int i = 0; i < page.getPositionCount(); i++) { - assertThat(BIGINT.getLong(page.getBlock(2), i)).isEqualTo(1); + assertThat(BIGINT.getLong(page.getBlock(1), i)).isEqualTo(1); count++; } } @@ -467,59 +445,45 @@ public void testMemoryReservationYield(Type type) @Test public void testHashBuilderResizeLimit() { - assertThatThrownBy(() -> testHashBuilderResizeLimit(true)) - .isInstanceOf(ExceededMemoryLimitException.class) - .hasMessageMatching("Query exceeded per-node memory limit of 3MB.*"); + assertThatThrownBy(() -> { + BlockBuilder builder = VARCHAR.createBlockBuilder(null, 1, MAX_BLOCK_SIZE_IN_BYTES); + VARCHAR.writeSlice(builder, Slices.allocate(5_000_000)); // this must be larger than MAX_BLOCK_SIZE_IN_BYTES, 64K + builder.build(); + + List hashChannels = Ints.asList(0); + RowPagesBuilder rowPagesBuilder = rowPagesBuilder(false, hashChannels, VARCHAR); + List input = rowPagesBuilder + .addSequencePage(10, 100) + .addBlocksPage(builder.build()) + .addSequencePage(10, 100) + .build(); - assertThatThrownBy(() -> testHashBuilderResizeLimit(false)) + DriverContext driverContext = createTaskContext(executor, scheduledExecutor, TEST_SESSION, DataSize.of(3, MEGABYTE)) + .addPipelineContext(0, true, true, false) + .addDriverContext(); + + HashAggregationOperatorFactory operatorFactory = new HashAggregationOperatorFactory( + 0, + new PlanNodeId("test"), + ImmutableList.of(VARCHAR), + hashChannels, + ImmutableList.of(), + SINGLE, + ImmutableList.of(COUNT.createAggregatorFactory(SINGLE, ImmutableList.of(0), OptionalInt.empty())), + Optional.empty(), + 100_000, + Optional.of(DataSize.of(16, MEGABYTE)), + hashStrategyCompiler, + Optional.empty()); + + toPages(operatorFactory, driverContext, input); + }) .isInstanceOf(ExceededMemoryLimitException.class) .hasMessageMatching("Query exceeded per-node memory limit of 3MB.*"); } - private void testHashBuilderResizeLimit(boolean hashEnabled) - { - BlockBuilder builder = VARCHAR.createBlockBuilder(null, 1, MAX_BLOCK_SIZE_IN_BYTES); - VARCHAR.writeSlice(builder, Slices.allocate(5_000_000)); // this must be larger than MAX_BLOCK_SIZE_IN_BYTES, 64K - builder.build(); - - List hashChannels = Ints.asList(0); - RowPagesBuilder rowPagesBuilder = rowPagesBuilder(hashEnabled, hashChannels, VARCHAR); - List input = rowPagesBuilder - .addSequencePage(10, 100) - .addBlocksPage(builder.build()) - .addSequencePage(10, 100) - .build(); - - DriverContext driverContext = createTaskContext(executor, scheduledExecutor, TEST_SESSION, DataSize.of(3, MEGABYTE)) - .addPipelineContext(0, true, true, false) - .addDriverContext(); - - HashAggregationOperatorFactory operatorFactory = new HashAggregationOperatorFactory( - 0, - new PlanNodeId("test"), - ImmutableList.of(VARCHAR), - hashChannels, - ImmutableList.of(), - SINGLE, - ImmutableList.of(COUNT.createAggregatorFactory(SINGLE, ImmutableList.of(0), OptionalInt.empty())), - rowPagesBuilder.getHashChannel(), - Optional.empty(), - 100_000, - Optional.of(DataSize.of(16, MEGABYTE)), - hashStrategyCompiler, - Optional.empty()); - - toPages(operatorFactory, driverContext, input); - } - @Test public void testMultiSliceAggregationOutput() - { - testMultiSliceAggregationOutput(true); - testMultiSliceAggregationOutput(false); - } - - private void testMultiSliceAggregationOutput(boolean hashEnabled) { // estimate the number of entries required to create 1.5 pages of results // See InMemoryHashAggregationBuilder.buildTypes() @@ -528,7 +492,7 @@ private void testMultiSliceAggregationOutput(boolean hashEnabled) int multiSlicePositionCount = (int) (1.5 * PageBuilderStatus.DEFAULT_MAX_PAGE_SIZE_IN_BYTES / fixedWidthSize); List hashChannels = Ints.asList(1); - RowPagesBuilder rowPagesBuilder = rowPagesBuilder(hashEnabled, hashChannels, BIGINT, BIGINT); + RowPagesBuilder rowPagesBuilder = rowPagesBuilder(false, hashChannels, BIGINT, BIGINT); List input = rowPagesBuilder .addSequencePage(multiSlicePositionCount, 0, 0) .build(); @@ -542,7 +506,6 @@ private void testMultiSliceAggregationOutput(boolean hashEnabled) SINGLE, ImmutableList.of(COUNT.createAggregatorFactory(SINGLE, ImmutableList.of(0), OptionalInt.empty()), LONG_AVERAGE.createAggregatorFactory(SINGLE, ImmutableList.of(1), OptionalInt.empty())), - rowPagesBuilder.getHashChannel(), Optional.empty(), 100_000, Optional.of(DataSize.of(16, MEGABYTE)), @@ -555,16 +518,9 @@ private void testMultiSliceAggregationOutput(boolean hashEnabled) @Test public void testMultiplePartialFlushes() throws Exception - { - testMultiplePartialFlushes(true); - testMultiplePartialFlushes(false); - } - - private void testMultiplePartialFlushes(boolean hashEnabled) - throws Exception { List hashChannels = Ints.asList(0); - RowPagesBuilder rowPagesBuilder = rowPagesBuilder(hashEnabled, hashChannels, BIGINT); + RowPagesBuilder rowPagesBuilder = rowPagesBuilder(false, hashChannels, BIGINT); List input = rowPagesBuilder .addSequencePage(500, 0) .addSequencePage(500, 500) @@ -580,7 +536,6 @@ private void testMultiplePartialFlushes(boolean hashEnabled) ImmutableList.of(), PARTIAL, ImmutableList.of(LONG_MIN.createAggregatorFactory(PARTIAL, ImmutableList.of(0), OptionalInt.empty())), - rowPagesBuilder.getHashChannel(), Optional.empty(), 100_000, Optional.of(DataSize.of(1, KILOBYTE)), @@ -625,12 +580,7 @@ private void testMultiplePartialFlushes(boolean hashEnabled) // Now, drive the operator to completion outputPages.addAll(toPages(operator, inputIterator)); - MaterializedResult actual; - if (hashEnabled) { - // Drop the hashChannel for all pages - outputPages = dropChannel(outputPages, ImmutableList.of(1)); - } - actual = toMaterializedResult(operator.getOperatorContext().getSession(), expected.getTypes(), outputPages); + MaterializedResult actual = toMaterializedResult(operator.getOperatorContext().getSession(), expected.getTypes(), outputPages); assertThat(actual.getTypes()).isEqualTo(expected.getTypes()); assertThat(actual.getMaterializedRows()).containsExactlyInAnyOrderElementsOf(expected.getMaterializedRows()); @@ -663,7 +613,6 @@ public void testMergeWithMemorySpill() SINGLE, false, ImmutableList.of(LONG_MIN.createAggregatorFactory(SINGLE, ImmutableList.of(0), OptionalInt.empty())), - rowPagesBuilder.getHashChannel(), Optional.empty(), 1, Optional.of(DataSize.of(16, MEGABYTE)), @@ -710,7 +659,6 @@ public void testSpillMetricsRecorded() SINGLE, false, ImmutableList.of(LONG_MIN.createAggregatorFactory(SINGLE, ImmutableList.of(0), OptionalInt.empty())), - pages.getHashChannel(), Optional.empty(), 10, Optional.of(DataSize.of(16, MEGABYTE)), @@ -779,7 +727,6 @@ public void testSpillerFailure() LONG_MIN.createAggregatorFactory(SINGLE, ImmutableList.of(3), OptionalInt.empty()), LONG_AVERAGE.createAggregatorFactory(SINGLE, ImmutableList.of(3), OptionalInt.empty()), maxVarcharColumn.createAggregatorFactory(SINGLE, ImmutableList.of(2), OptionalInt.empty())), - rowPagesBuilder.getHashChannel(), Optional.empty(), 100_000, Optional.of(DataSize.of(16, MEGABYTE)), @@ -812,7 +759,6 @@ public void testMemoryTracking() ImmutableList.of(), SINGLE, ImmutableList.of(LONG_MIN.createAggregatorFactory(SINGLE, ImmutableList.of(0), OptionalInt.empty())), - rowPagesBuilder.getHashChannel(), Optional.empty(), 100_000, Optional.of(DataSize.of(16, MEGABYTE)), @@ -850,7 +796,6 @@ public void testAdaptivePartialAggregation() PARTIAL, ImmutableList.of(LONG_MIN.createAggregatorFactory(PARTIAL, ImmutableList.of(0), OptionalInt.empty())), Optional.empty(), - Optional.empty(), 100, Optional.of(maxPartialMemory), // this setting makes operator to flush after each page hashStrategyCompiler, @@ -931,7 +876,6 @@ public void testAdaptivePartialAggregationTriggeredOnlyOnFlush() PARTIAL, ImmutableList.of(LONG_MIN.createAggregatorFactory(PARTIAL, ImmutableList.of(0), OptionalInt.empty())), Optional.empty(), - Optional.empty(), 10, Optional.of(DataSize.of(16, MEGABYTE)), // this setting makes operator to flush only after all pages hashStrategyCompiler, @@ -1001,7 +945,6 @@ public void testAsyncSpillBlocksAndUnblocksDriver() false, ImmutableList.of(COUNT.createAggregatorFactory(SINGLE, ImmutableList.of(0), OptionalInt.empty())), Optional.empty(), - Optional.empty(), /* expectedGroups */ 1, Optional.of(DataSize.of(16, MEGABYTE)), /* spill enabled */ true, @@ -1066,7 +1009,6 @@ public void testRevocableMemoryConvertedAfterAsyncSpill() ImmutableList.of(0), OptionalInt.empty())), Optional.empty(), - Optional.empty(), 10, Optional.of(DataSize.of(16, MEGABYTE)), /* spill enabled */ true, diff --git a/core/trino-main/src/test/java/io/trino/operator/TestMarkDistinctOperator.java b/core/trino-main/src/test/java/io/trino/operator/TestMarkDistinctOperator.java index 74d9673daccf..ca753f218c5c 100644 --- a/core/trino-main/src/test/java/io/trino/operator/TestMarkDistinctOperator.java +++ b/core/trino-main/src/test/java/io/trino/operator/TestMarkDistinctOperator.java @@ -87,7 +87,6 @@ private void testMarkDistinct(boolean hashEnabled, DriverContext driverContext) new PlanNodeId("test"), rowPagesBuilder.getTypes(), ImmutableList.of(0), - rowPagesBuilder.getHashChannel(), hashStrategyCompiler); MaterializedResult.Builder expected = resultBuilder(driverContext.getSession(), BIGINT, BOOLEAN); @@ -124,7 +123,6 @@ private void testRleDistinctMask(boolean hashEnabled, DriverContext driverContex new PlanNodeId("test"), rowPagesBuilder.getTypes(), ImmutableList.of(0), - rowPagesBuilder.getHashChannel(), hashStrategyCompiler); int maskChannel = firstInput.getChannelCount(); // mask channel is appended to the input @@ -180,7 +178,7 @@ private void testMemoryReservationYield(Type type) { List input = createPagesWithDistinctHashKeys(type, 6_000, 600); - OperatorFactory operatorFactory = new MarkDistinctOperatorFactory(0, new PlanNodeId("test"), ImmutableList.of(type), ImmutableList.of(0), Optional.of(1), hashStrategyCompiler); + OperatorFactory operatorFactory = new MarkDistinctOperatorFactory(0, new PlanNodeId("test"), ImmutableList.of(type), ImmutableList.of(0), hashStrategyCompiler); // get result with yield; pick a relatively small buffer for partitionRowCount's memory usage GroupByHashYieldAssertion.GroupByHashYieldResult result = finishOperatorWithYieldingGroupByHash(input, type, operatorFactory, operator -> ((MarkDistinctOperator) operator).getCapacity(), 450_000); diff --git a/core/trino-main/src/test/java/io/trino/operator/TestRowNumberOperator.java b/core/trino-main/src/test/java/io/trino/operator/TestRowNumberOperator.java index 3b9d96a3a4c3..fe8776db139a 100644 --- a/core/trino-main/src/test/java/io/trino/operator/TestRowNumberOperator.java +++ b/core/trino-main/src/test/java/io/trino/operator/TestRowNumberOperator.java @@ -113,7 +113,6 @@ public void testRowNumberUnpartitioned() Ints.asList(), ImmutableList.of(), Optional.empty(), - Optional.empty(), 10, hashStrategyCompiler); @@ -152,7 +151,6 @@ public void testMemoryReservationYield() ImmutableList.of(0), ImmutableList.of(0), ImmutableList.of(type), - Optional.empty(), Optional.of(1), 1, hashStrategyCompiler); @@ -164,9 +162,9 @@ public void testMemoryReservationYield() int count = 0; for (Page page : result.getOutput()) { - assertThat(page.getChannelCount()).isEqualTo(3); + assertThat(page.getChannelCount()).isEqualTo(2); for (int i = 0; i < page.getPositionCount(); i++) { - assertThat(BIGINT.getLong(page.getBlock(2), i)).isEqualTo(1); + assertThat(BIGINT.getLong(page.getBlock(1), i)).isEqualTo(1); count++; } } @@ -177,141 +175,135 @@ public void testMemoryReservationYield() @Test public void testRowNumberPartitioned() { - for (boolean hashEnabled : Arrays.asList(true, false)) { - DriverContext driverContext = getDriverContext(); - RowPagesBuilder rowPagesBuilder = rowPagesBuilder(hashEnabled, Ints.asList(0), BIGINT, DOUBLE); - List input = rowPagesBuilder - .row(1L, 0.3) - .row(2L, 0.2) - .row(3L, 0.1) - .row(3L, 0.19) - .pageBreak() - .row(1L, 0.4) - .pageBreak() - .row(1L, 0.5) - .row(1L, 0.6) - .row(2L, 0.7) - .row(2L, 0.8) - .row(2L, 0.9) - .build(); - - RowNumberOperator.RowNumberOperatorFactory operatorFactory = new RowNumberOperator.RowNumberOperatorFactory( - 0, - new PlanNodeId("test"), - ImmutableList.of(BIGINT, DOUBLE), - Ints.asList(1, 0), - Ints.asList(0), - ImmutableList.of(BIGINT), - Optional.of(10), - rowPagesBuilder.getHashChannel(), - 10, - hashStrategyCompiler); + DriverContext driverContext = getDriverContext(); + RowPagesBuilder rowPagesBuilder = rowPagesBuilder(false, Ints.asList(0), BIGINT, DOUBLE); + List input = rowPagesBuilder + .row(1L, 0.3) + .row(2L, 0.2) + .row(3L, 0.1) + .row(3L, 0.19) + .pageBreak() + .row(1L, 0.4) + .pageBreak() + .row(1L, 0.5) + .row(1L, 0.6) + .row(2L, 0.7) + .row(2L, 0.8) + .row(2L, 0.9) + .build(); - MaterializedResult expectedPartition1 = resultBuilder(driverContext.getSession(), DOUBLE, BIGINT) - .row(0.3, 1L) - .row(0.4, 1L) - .row(0.5, 1L) - .row(0.6, 1L) - .build(); - - MaterializedResult expectedPartition2 = resultBuilder(driverContext.getSession(), DOUBLE, BIGINT) - .row(0.2, 2L) - .row(0.7, 2L) - .row(0.8, 2L) - .row(0.9, 2L) - .build(); - - MaterializedResult expectedPartition3 = resultBuilder(driverContext.getSession(), DOUBLE, BIGINT) - .row(0.1, 3L) - .row(0.19, 3L) - .build(); - - List pages = toPages(operatorFactory, driverContext, input); - Block rowNumberColumn = getRowNumberColumn(pages); - assertThat(rowNumberColumn.getPositionCount()).isEqualTo(10); - - pages = stripRowNumberColumn(pages); - MaterializedResult actual = toMaterializedResult(driverContext.getSession(), ImmutableList.of(DOUBLE, BIGINT), pages); - Set actualSet = ImmutableSet.copyOf(actual.getMaterializedRows()); - Set expectedPartition1Set = ImmutableSet.copyOf(expectedPartition1.getMaterializedRows()); - Set expectedPartition2Set = ImmutableSet.copyOf(expectedPartition2.getMaterializedRows()); - Set expectedPartition3Set = ImmutableSet.copyOf(expectedPartition3.getMaterializedRows()); - assertThat(Sets.intersection(expectedPartition1Set, actualSet)).hasSize(4); - assertThat(Sets.intersection(expectedPartition2Set, actualSet)).hasSize(4); - assertThat(Sets.intersection(expectedPartition3Set, actualSet)).hasSize(2); - } + RowNumberOperator.RowNumberOperatorFactory operatorFactory = new RowNumberOperator.RowNumberOperatorFactory( + 0, + new PlanNodeId("test"), + ImmutableList.of(BIGINT, DOUBLE), + Ints.asList(1, 0), + Ints.asList(0), + ImmutableList.of(BIGINT), + Optional.of(10), + 10, + hashStrategyCompiler); + + MaterializedResult expectedPartition1 = resultBuilder(driverContext.getSession(), DOUBLE, BIGINT) + .row(0.3, 1L) + .row(0.4, 1L) + .row(0.5, 1L) + .row(0.6, 1L) + .build(); + + MaterializedResult expectedPartition2 = resultBuilder(driverContext.getSession(), DOUBLE, BIGINT) + .row(0.2, 2L) + .row(0.7, 2L) + .row(0.8, 2L) + .row(0.9, 2L) + .build(); + + MaterializedResult expectedPartition3 = resultBuilder(driverContext.getSession(), DOUBLE, BIGINT) + .row(0.1, 3L) + .row(0.19, 3L) + .build(); + + List pages = toPages(operatorFactory, driverContext, input); + Block rowNumberColumn = getRowNumberColumn(pages); + assertThat(rowNumberColumn.getPositionCount()).isEqualTo(10); + + pages = stripRowNumberColumn(pages); + MaterializedResult actual = toMaterializedResult(driverContext.getSession(), ImmutableList.of(DOUBLE, BIGINT), pages); + Set actualSet = ImmutableSet.copyOf(actual.getMaterializedRows()); + Set expectedPartition1Set = ImmutableSet.copyOf(expectedPartition1.getMaterializedRows()); + Set expectedPartition2Set = ImmutableSet.copyOf(expectedPartition2.getMaterializedRows()); + Set expectedPartition3Set = ImmutableSet.copyOf(expectedPartition3.getMaterializedRows()); + assertThat(Sets.intersection(expectedPartition1Set, actualSet)).hasSize(4); + assertThat(Sets.intersection(expectedPartition2Set, actualSet)).hasSize(4); + assertThat(Sets.intersection(expectedPartition3Set, actualSet)).hasSize(2); } @Test public void testRowNumberPartitionedLimit() { - for (boolean hashEnabled : Arrays.asList(true, false)) { - DriverContext driverContext = getDriverContext(); - RowPagesBuilder rowPagesBuilder = rowPagesBuilder(hashEnabled, Ints.asList(0), BIGINT, DOUBLE); - List input = rowPagesBuilder - .row(1L, 0.3) - .row(2L, 0.2) - .row(3L, 0.1) - .row(3L, 0.19) - .pageBreak() - .row(1L, 0.4) - .pageBreak() - .row(1L, 0.5) - .row(1L, 0.6) - .row(2L, 0.7) - .row(2L, 0.8) - .row(2L, 0.9) - .build(); - - RowNumberOperator.RowNumberOperatorFactory operatorFactory = new RowNumberOperator.RowNumberOperatorFactory( - 0, - new PlanNodeId("test"), - ImmutableList.of(BIGINT, DOUBLE), - Ints.asList(1, 0), - Ints.asList(0), - ImmutableList.of(BIGINT), - Optional.of(3), - Optional.empty(), - 10, - hashStrategyCompiler); + DriverContext driverContext = getDriverContext(); + RowPagesBuilder rowPagesBuilder = rowPagesBuilder(false, Ints.asList(0), BIGINT, DOUBLE); + List input = rowPagesBuilder + .row(1L, 0.3) + .row(2L, 0.2) + .row(3L, 0.1) + .row(3L, 0.19) + .pageBreak() + .row(1L, 0.4) + .pageBreak() + .row(1L, 0.5) + .row(1L, 0.6) + .row(2L, 0.7) + .row(2L, 0.8) + .row(2L, 0.9) + .build(); - MaterializedResult expectedPartition1 = resultBuilder(driverContext.getSession(), DOUBLE, BIGINT) - .row(0.3, 1L) - .row(0.4, 1L) - .row(0.5, 1L) - .row(0.6, 1L) - .build(); - - MaterializedResult expectedPartition2 = resultBuilder(driverContext.getSession(), DOUBLE, BIGINT) - .row(0.2, 2L) - .row(0.7, 2L) - .row(0.8, 2L) - .row(0.9, 2L) - .build(); - - MaterializedResult expectedPartition3 = resultBuilder(driverContext.getSession(), DOUBLE, BIGINT) - .row(0.1, 3L) - .row(0.19, 3L) - .build(); - - List pages = toPages(operatorFactory, driverContext, input); - Block rowNumberColumn = getRowNumberColumn(pages); - assertThat(rowNumberColumn.getPositionCount()).isEqualTo(8); - // Check that all row numbers generated are <= 3 - for (int i = 0; i < rowNumberColumn.getPositionCount(); i++) { - assertThat(BIGINT.getLong(rowNumberColumn, i) <= 3).isTrue(); - } + RowNumberOperator.RowNumberOperatorFactory operatorFactory = new RowNumberOperator.RowNumberOperatorFactory( + 0, + new PlanNodeId("test"), + ImmutableList.of(BIGINT, DOUBLE), + Ints.asList(1, 0), + Ints.asList(0), + ImmutableList.of(BIGINT), + Optional.of(3), + 10, + hashStrategyCompiler); + + MaterializedResult expectedPartition1 = resultBuilder(driverContext.getSession(), DOUBLE, BIGINT) + .row(0.3, 1L) + .row(0.4, 1L) + .row(0.5, 1L) + .row(0.6, 1L) + .build(); + + MaterializedResult expectedPartition2 = resultBuilder(driverContext.getSession(), DOUBLE, BIGINT) + .row(0.2, 2L) + .row(0.7, 2L) + .row(0.8, 2L) + .row(0.9, 2L) + .build(); + + MaterializedResult expectedPartition3 = resultBuilder(driverContext.getSession(), DOUBLE, BIGINT) + .row(0.1, 3L) + .row(0.19, 3L) + .build(); - pages = stripRowNumberColumn(pages); - MaterializedResult actual = toMaterializedResult(driverContext.getSession(), ImmutableList.of(DOUBLE, BIGINT), pages); - Set actualSet = ImmutableSet.copyOf(actual.getMaterializedRows()); - Set expectedPartition1Set = ImmutableSet.copyOf(expectedPartition1.getMaterializedRows()); - Set expectedPartition2Set = ImmutableSet.copyOf(expectedPartition2.getMaterializedRows()); - Set expectedPartition3Set = ImmutableSet.copyOf(expectedPartition3.getMaterializedRows()); - assertThat(Sets.intersection(expectedPartition1Set, actualSet)).hasSize(3); - assertThat(Sets.intersection(expectedPartition2Set, actualSet)).hasSize(3); - assertThat(Sets.intersection(expectedPartition3Set, actualSet)).hasSize(2); + List pages = toPages(operatorFactory, driverContext, input); + Block rowNumberColumn = getRowNumberColumn(pages); + assertThat(rowNumberColumn.getPositionCount()).isEqualTo(8); + // Check that all row numbers generated are <= 3 + for (int i = 0; i < rowNumberColumn.getPositionCount(); i++) { + assertThat(BIGINT.getLong(rowNumberColumn, i) <= 3).isTrue(); } + + pages = stripRowNumberColumn(pages); + MaterializedResult actual = toMaterializedResult(driverContext.getSession(), ImmutableList.of(DOUBLE, BIGINT), pages); + Set actualSet = ImmutableSet.copyOf(actual.getMaterializedRows()); + Set expectedPartition1Set = ImmutableSet.copyOf(expectedPartition1.getMaterializedRows()); + Set expectedPartition2Set = ImmutableSet.copyOf(expectedPartition2.getMaterializedRows()); + Set expectedPartition3Set = ImmutableSet.copyOf(expectedPartition3.getMaterializedRows()); + assertThat(Sets.intersection(expectedPartition1Set, actualSet)).hasSize(3); + assertThat(Sets.intersection(expectedPartition2Set, actualSet)).hasSize(3); + assertThat(Sets.intersection(expectedPartition3Set, actualSet)).hasSize(2); } @Test @@ -341,7 +333,6 @@ public void testRowNumberUnpartitionedLimit() Ints.asList(), ImmutableList.of(), Optional.of(3), - Optional.empty(), 10, hashStrategyCompiler); diff --git a/core/trino-main/src/test/java/io/trino/operator/TestTopNRankingOperator.java b/core/trino-main/src/test/java/io/trino/operator/TestTopNRankingOperator.java index 880a84d30398..648b766033c1 100644 --- a/core/trino-main/src/test/java/io/trino/operator/TestTopNRankingOperator.java +++ b/core/trino-main/src/test/java/io/trino/operator/TestTopNRankingOperator.java @@ -110,7 +110,6 @@ public void testPartitioned() Ints.asList(1), 3, false, - Optional.empty(), 10, Optional.empty(), hashStrategyCompiler, @@ -165,7 +164,6 @@ public void testUnPartitioned() Ints.asList(1), 3, partial, - Optional.empty(), 10, partial ? Optional.of(DataSize.ofBytes(1)) : Optional.empty(), hashStrategyCompiler, @@ -230,7 +228,6 @@ public void testPartialFlush() Ints.asList(1), 3, partial, - Optional.empty(), 10, partial ? Optional.of(DataSize.of(1, DataSize.Unit.BYTE)) : Optional.empty(), hashStrategyCompiler, @@ -273,7 +270,6 @@ public void testMemoryReservationYield() Ints.asList(0), 3, false, - Optional.empty(), 10, Optional.empty(), hashStrategyCompiler, @@ -333,7 +329,6 @@ public void testRankNullAndNan() Ints.asList(1), 3, false, - Optional.empty(), 10, Optional.empty(), hashStrategyCompiler, diff --git a/core/trino-main/src/test/java/io/trino/sql/planner/assertions/DistinctLimitMatcher.java b/core/trino-main/src/test/java/io/trino/sql/planner/assertions/DistinctLimitMatcher.java index 651164485f5c..dc555906a395 100644 --- a/core/trino-main/src/test/java/io/trino/sql/planner/assertions/DistinctLimitMatcher.java +++ b/core/trino-main/src/test/java/io/trino/sql/planner/assertions/DistinctLimitMatcher.java @@ -22,7 +22,6 @@ import io.trino.sql.planner.plan.PlanNode; import java.util.List; -import java.util.Optional; import static com.google.common.base.MoreObjects.toStringHelper; import static com.google.common.base.Preconditions.checkState; @@ -35,13 +34,11 @@ public class DistinctLimitMatcher { private final long limit; private final List distinctSymbols; - private final Optional hashSymbol; - public DistinctLimitMatcher(long limit, List distinctSymbols, Optional hashSymbol) + public DistinctLimitMatcher(long limit, List distinctSymbols) { this.limit = limit; this.distinctSymbols = ImmutableList.copyOf(requireNonNull(distinctSymbols, "distinctSymbols is null")); - this.hashSymbol = requireNonNull(hashSymbol, "hashSymbol is null"); } @Override @@ -60,10 +57,6 @@ public MatchResult detailMatches(PlanNode node, StatsProvider stats, Session ses return NO_MATCH; } - if (!distinctLimitNode.getHashSymbol().equals(hashSymbol.map(alias -> alias.toSymbol(symbolAliases)))) { - return NO_MATCH; - } - return new MatchResult(ImmutableSet.copyOf(distinctLimitNode.getDistinctSymbols()) .equals(distinctSymbols.stream().map(alias -> alias.toSymbol(symbolAliases)).collect(toImmutableSet()))); } @@ -74,7 +67,6 @@ public String toString() return toStringHelper(this) .add("limit", limit) .add("distinctSymbols", distinctSymbols) - .add("hashSymbol", hashSymbol) .toString(); } } diff --git a/core/trino-main/src/test/java/io/trino/sql/planner/assertions/MarkDistinctMatcher.java b/core/trino-main/src/test/java/io/trino/sql/planner/assertions/MarkDistinctMatcher.java index 9e35d2695911..841c942eaa8c 100644 --- a/core/trino-main/src/test/java/io/trino/sql/planner/assertions/MarkDistinctMatcher.java +++ b/core/trino-main/src/test/java/io/trino/sql/planner/assertions/MarkDistinctMatcher.java @@ -22,7 +22,6 @@ import io.trino.sql.planner.plan.PlanNode; import java.util.List; -import java.util.Optional; import static com.google.common.base.MoreObjects.toStringHelper; import static com.google.common.base.Preconditions.checkState; @@ -36,13 +35,11 @@ public class MarkDistinctMatcher { private final PlanTestSymbol markerSymbol; private final List distinctSymbols; - private final Optional hashSymbol; - public MarkDistinctMatcher(PlanTestSymbol markerSymbol, List distinctSymbols, Optional hashSymbol) + public MarkDistinctMatcher(PlanTestSymbol markerSymbol, List distinctSymbols) { this.markerSymbol = requireNonNull(markerSymbol, "markerSymbol is null"); this.distinctSymbols = ImmutableList.copyOf(distinctSymbols); - this.hashSymbol = requireNonNull(hashSymbol, "hashSymbol is null"); } @Override @@ -57,10 +54,6 @@ public MatchResult detailMatches(PlanNode node, StatsProvider stats, Session ses checkState(shapeMatches(node), "Plan testing framework error: shapeMatches returned false in detailMatches in %s", this.getClass().getName()); MarkDistinctNode markDistinctNode = (MarkDistinctNode) node; - if (!markDistinctNode.getHashSymbol().equals(hashSymbol.map(alias -> alias.toSymbol(symbolAliases)))) { - return NO_MATCH; - } - if (!ImmutableSet.copyOf(markDistinctNode.getDistinctSymbols()) .equals(distinctSymbols.stream().map(alias -> alias.toSymbol(symbolAliases)).collect(toImmutableSet()))) { return NO_MATCH; @@ -75,7 +68,6 @@ public String toString() return toStringHelper(this) .add("markerSymbol", markerSymbol) .add("distinctSymbols", distinctSymbols) - .add("hashSymbol", hashSymbol) .toString(); } } diff --git a/core/trino-main/src/test/java/io/trino/sql/planner/assertions/PlanMatchPattern.java b/core/trino-main/src/test/java/io/trino/sql/planner/assertions/PlanMatchPattern.java index 03e0120a3889..8247c5024fbe 100644 --- a/core/trino-main/src/test/java/io/trino/sql/planner/assertions/PlanMatchPattern.java +++ b/core/trino-main/src/test/java/io/trino/sql/planner/assertions/PlanMatchPattern.java @@ -360,39 +360,17 @@ public static PlanMatchPattern distinctLimit(long limit, List distinctSy { return node(DistinctLimitNode.class, source).with(new DistinctLimitMatcher( limit, - toSymbolAliases(distinctSymbols), - Optional.empty())); - } - - public static PlanMatchPattern distinctLimit(long limit, List distinctSymbols, String hashSymbol, PlanMatchPattern source) - { - return node(DistinctLimitNode.class, source).with(new DistinctLimitMatcher( - limit, - toSymbolAliases(distinctSymbols), - Optional.of(new SymbolAlias(hashSymbol)))); - } - - public static PlanMatchPattern markDistinct( - String markerSymbol, - List distinctSymbols, - PlanMatchPattern source) - { - return node(MarkDistinctNode.class, source).with(new MarkDistinctMatcher( - new SymbolAlias(markerSymbol), - toSymbolAliases(distinctSymbols), - Optional.empty())); + toSymbolAliases(distinctSymbols))); } public static PlanMatchPattern markDistinct( String markerSymbol, List distinctSymbols, - String hashSymbol, PlanMatchPattern source) { return node(MarkDistinctNode.class, source).with(new MarkDistinctMatcher( new SymbolAlias(markerSymbol), - toSymbolAliases(distinctSymbols), - Optional.of(new SymbolAlias(hashSymbol)))); + toSymbolAliases(distinctSymbols))); } public static PlanMatchPattern window(Consumer handler, PlanMatchPattern source) diff --git a/core/trino-main/src/test/java/io/trino/sql/planner/assertions/RowNumberMatcher.java b/core/trino-main/src/test/java/io/trino/sql/planner/assertions/RowNumberMatcher.java index e72b77c20163..d586ac7e9281 100644 --- a/core/trino-main/src/test/java/io/trino/sql/planner/assertions/RowNumberMatcher.java +++ b/core/trino-main/src/test/java/io/trino/sql/planner/assertions/RowNumberMatcher.java @@ -37,20 +37,17 @@ public class RowNumberMatcher private final Optional> partitionBy; private final Optional> maxRowCountPerPartition; private final Optional rowNumberSymbol; - private final Optional> hashSymbol; private final Optional orderSensitive; private RowNumberMatcher( Optional> partitionBy, Optional> maxRowCountPerPartition, Optional rowNumberSymbol, - Optional> hashSymbol, Optional orderSensitive) { this.partitionBy = requireNonNull(partitionBy, "partitionBy is null"); this.maxRowCountPerPartition = requireNonNull(maxRowCountPerPartition, "maxRowCountPerPartition is null"); this.rowNumberSymbol = requireNonNull(rowNumberSymbol, "rowNumberSymbol is null"); - this.hashSymbol = requireNonNull(hashSymbol, "hashSymbol is null"); this.orderSensitive = requireNonNull(orderSensitive, "orderSensitive is null"); } @@ -90,13 +87,6 @@ public MatchResult detailMatches(PlanNode node, StatsProvider stats, Session ses } } - if (hashSymbol.isPresent()) { - Optional expected = hashSymbol.get().map(alias -> alias.toSymbol(symbolAliases)); - if (!expected.equals(rowNumberNode.getHashSymbol())) { - return NO_MATCH; - } - } - if (orderSensitive.isPresent()) { if (!orderSensitive.get().equals(rowNumberNode.isOrderSensitive())) { return NO_MATCH; @@ -113,7 +103,6 @@ public String toString() .add("partitionBy", partitionBy) .add("maxRowCountPerPartition", maxRowCountPerPartition) .add("rowNumberSymbol", rowNumberSymbol) - .add("hashSymbol", hashSymbol) .add("orderSensitive", orderSensitive) .toString(); } @@ -129,7 +118,6 @@ public static class Builder private Optional> partitionBy = Optional.empty(); private Optional> maxRowCountPerPartition = Optional.empty(); private Optional rowNumberSymbol = Optional.empty(); - private Optional> hashSymbol = Optional.empty(); private Optional orderSensitive = Optional.empty(); Builder(PlanMatchPattern source) @@ -158,13 +146,6 @@ public Builder rowNumberSymbol(SymbolAlias rowNumberSymbol) return this; } - public Builder hashSymbol(Optional hashSymbol) - { - requireNonNull(hashSymbol, "hashSymbol is null"); - this.hashSymbol = Optional.of(hashSymbol.map(SymbolAlias::new)); - return this; - } - public Builder orderSensitive(boolean isOrderSensitive) { this.orderSensitive = Optional.of(isOrderSensitive); @@ -178,7 +159,6 @@ PlanMatchPattern build() partitionBy, maxRowCountPerPartition, rowNumberSymbol, - hashSymbol, orderSensitive)); } } diff --git a/core/trino-main/src/test/java/io/trino/sql/planner/assertions/TopNRankingMatcher.java b/core/trino-main/src/test/java/io/trino/sql/planner/assertions/TopNRankingMatcher.java index f11bfed8d472..1dacf3fbea66 100644 --- a/core/trino-main/src/test/java/io/trino/sql/planner/assertions/TopNRankingMatcher.java +++ b/core/trino-main/src/test/java/io/trino/sql/planner/assertions/TopNRankingMatcher.java @@ -42,22 +42,19 @@ public class TopNRankingMatcher private final Optional rankingType; private final Optional maxRankingPerPartition; private final Optional partial; - private final Optional> hashSymbol; private TopNRankingMatcher( Optional> specification, Optional rankingSymbol, Optional rankingType, Optional maxRankingPerPartition, - Optional partial, - Optional> hashSymbol) + Optional partial) { this.specification = requireNonNull(specification, "specification is null"); this.rankingSymbol = requireNonNull(rankingSymbol, "rankingSymbol is null"); this.rankingType = requireNonNull(rankingType, "rankingType is null"); this.maxRankingPerPartition = requireNonNull(maxRankingPerPartition, "maxRankingPerPartition is null"); this.partial = requireNonNull(partial, "partial is null"); - this.hashSymbol = requireNonNull(hashSymbol, "hashSymbol is null"); } @Override @@ -105,13 +102,6 @@ public MatchResult detailMatches(PlanNode node, StatsProvider stats, Session ses } } - if (hashSymbol.isPresent()) { - Optional expected = hashSymbol.get().map(alias -> alias.toSymbol(symbolAliases)); - if (!expected.equals(topNRankingNode.getHashSymbol())) { - return NO_MATCH; - } - } - return match(); } @@ -124,7 +114,6 @@ public String toString() .add("rankingType", rankingType) .add("maxRankingPerPartition", maxRankingPerPartition) .add("partial", partial) - .add("hashSymbol", hashSymbol) .toString(); } @@ -136,7 +125,6 @@ public static class Builder private Optional rankingType = Optional.empty(); private Optional maxRankingPerPartition = Optional.empty(); private Optional partial = Optional.empty(); - private Optional> hashSymbol = Optional.empty(); Builder(PlanMatchPattern source) { @@ -173,12 +161,6 @@ public Builder partial(boolean partial) return this; } - public Builder hashSymbol(Optional hashSymbol) - { - this.hashSymbol = Optional.of(requireNonNull(hashSymbol, "hashSymbol is null")); - return this; - } - PlanMatchPattern build() { return node(TopNRankingNode.class, source).with( @@ -187,8 +169,7 @@ PlanMatchPattern build() rankingSymbol, rankingType, maxRankingPerPartition, - partial, - hashSymbol)); + partial)); } } } diff --git a/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/TestMultipleDistinctAggregationsToSubqueries.java b/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/TestMultipleDistinctAggregationsToSubqueries.java index 7bad5ebb6d33..be92f5c5afd1 100644 --- a/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/TestMultipleDistinctAggregationsToSubqueries.java +++ b/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/TestMultipleDistinctAggregationsToSubqueries.java @@ -172,27 +172,6 @@ public void testDoesNotFire() }) .doesNotFire(); - // hash symbol - ruleTester.assertThat(newMultipleDistinctAggregationsToSubqueries(ruleTester)) - .setSystemProperty(DISTINCT_AGGREGATIONS_STRATEGY, "split_to_subqueries") - .on(p -> { - Symbol input1Symbol = p.symbol("input1Symbol", BIGINT); - Symbol input2Symbol = p.symbol("input2Symbol", BIGINT); - return p.aggregation(builder -> builder - .globalGrouping() - .addAggregation(p.symbol("output1", BIGINT), PlanBuilder.aggregation("count", true, ImmutableList.of(new Reference(BIGINT, "input1Symbol"))), ImmutableList.of(BIGINT)) - .addAggregation(p.symbol("output2", BIGINT), PlanBuilder.aggregation("sum", true, ImmutableList.of(new Reference(BIGINT, "input2Symbol"))), ImmutableList.of(BIGINT)) - .hashSymbol(p.symbol("hashSymbol", BIGINT)) - .source( - p.tableScan( - testTableHandle(ruleTester), - ImmutableList.of(input1Symbol, input2Symbol), - ImmutableMap.of( - input1Symbol, COLUMN_1_HANDLE, - input2Symbol, COLUMN_2_HANDLE)))); - }) - .doesNotFire(); - // non-distinct ruleTester.assertThat(newMultipleDistinctAggregationsToSubqueries(ruleTester)) .setSystemProperty(DISTINCT_AGGREGATIONS_STRATEGY, "split_to_subqueries") diff --git a/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/TestPruneAggregationSourceColumns.java b/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/TestPruneAggregationSourceColumns.java index c10ce38ba60a..28b1bc98e01f 100644 --- a/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/TestPruneAggregationSourceColumns.java +++ b/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/TestPruneAggregationSourceColumns.java @@ -60,9 +60,8 @@ public void testNotAllInputsReferenced() ImmutableMap.of( "input", expression(new Reference(BIGINT, "input")), "key", expression(new Reference(BIGINT, "key")), - "keyHash", expression(new Reference(BIGINT, "keyHash")), "mask", expression(new Reference(BOOLEAN, "mask"))), - values("input", "key", "keyHash", "mask", "unused")))); + values("input", "key", "mask", "unused")))); } @Test @@ -78,14 +77,12 @@ private AggregationNode buildAggregation(PlanBuilder planBuilder, Predicate sourceSymbols = ImmutableList.of(input, key, keyHash, mask, unused); + List sourceSymbols = ImmutableList.of(input, key, mask, unused); return planBuilder.aggregation(aggregationBuilder -> aggregationBuilder .singleGroupingSet(key) .addAggregation(avg, PlanBuilder.aggregation("avg", ImmutableList.of(new Reference(BIGINT, "input"))), ImmutableList.of(BIGINT), mask) - .hashSymbol(keyHash) .source( planBuilder.values( sourceSymbols.stream() diff --git a/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/TestPruneDistinctLimitSourceColumns.java b/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/TestPruneDistinctLimitSourceColumns.java index ba23b0e281d7..ec6adbccef69 100644 --- a/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/TestPruneDistinctLimitSourceColumns.java +++ b/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/TestPruneDistinctLimitSourceColumns.java @@ -20,8 +20,6 @@ import io.trino.sql.planner.iterative.rule.test.BaseRuleTest; import org.junit.jupiter.api.Test; -import java.util.Optional; - import static io.trino.spi.type.BigintType.BIGINT; import static io.trino.sql.planner.assertions.PlanMatchPattern.distinctLimit; import static io.trino.sql.planner.assertions.PlanMatchPattern.expression; @@ -55,21 +53,18 @@ public void testPruneInputColumn() .on(p -> { Symbol a = p.symbol("a"); Symbol b = p.symbol("b"); - Symbol hashSymbol = p.symbol("hash_symbol"); return p.distinctLimit( 5, ImmutableList.of(a), - Optional.of(hashSymbol), - p.values(a, b, hashSymbol)); + p.values(a, b)); }) .matches( distinctLimit( 5, ImmutableList.of("a"), - "hash_symbol", strictProject( - ImmutableMap.of("a", expression(new Reference(BIGINT, "a")), "hash_symbol", expression(new Reference(BIGINT, "hash_symbol"))), - values("a", "b", "hash_symbol")))); + ImmutableMap.of("a", expression(new Reference(BIGINT, "a"))), + values("a", "b")))); } @Test @@ -79,12 +74,10 @@ public void allInputsNeeded() .on(p -> { Symbol a = p.symbol("a"); Symbol b = p.symbol("b"); - Symbol hashSymbol = p.symbol("hash_symbol"); return p.distinctLimit( 5, ImmutableList.of(a, b), - Optional.of(hashSymbol), - p.values(a, b, hashSymbol)); + p.values(a, b)); }) .doesNotFire(); } diff --git a/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/TestPruneMarkDistinctColumns.java b/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/TestPruneMarkDistinctColumns.java index 2fd737e0f357..d1773a2bafb3 100644 --- a/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/TestPruneMarkDistinctColumns.java +++ b/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/TestPruneMarkDistinctColumns.java @@ -57,25 +57,22 @@ public void testSourceSymbolNotReferenced() .on(p -> { Symbol key = p.symbol("key"); Symbol mark = p.symbol("mark"); - Symbol hash = p.symbol("hash"); Symbol unused = p.symbol("unused"); return p.project( Assignments.identity(mark), p.markDistinct( mark, ImmutableList.of(key), - hash, - p.values(key, hash, unused))); + p.values(key, unused))); }) .matches( strictProject( ImmutableMap.of("mark", expression(new Reference(BOOLEAN, "mark"))), - markDistinct("mark", ImmutableList.of("key"), "hash", + markDistinct("mark", ImmutableList.of("key"), strictProject( ImmutableMap.of( - "key", expression(new Reference(BIGINT, "key")), - "hash", expression(new Reference(BIGINT, "hash"))), - values(ImmutableList.of("key", "hash", "unused")))))); + "key", expression(new Reference(BIGINT, "key"))), + values(ImmutableList.of("key", "unused")))))); } @Test diff --git a/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/TestPruneOrderByInAggregation.java b/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/TestPruneOrderByInAggregation.java index 40fe48829dbc..384e2ed6fcca 100644 --- a/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/TestPruneOrderByInAggregation.java +++ b/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/TestPruneOrderByInAggregation.java @@ -62,7 +62,7 @@ public void testBasics() ImmutableList.of("mask"), Optional.empty(), SINGLE, - values("input", "key", "keyHash", "mask"))); + values("input", "key", "mask"))); } private AggregationNode buildAggregation(PlanBuilder planBuilder) @@ -71,9 +71,8 @@ private AggregationNode buildAggregation(PlanBuilder planBuilder) Symbol arrayAgg = planBuilder.symbol("array_agg"); Symbol input = planBuilder.symbol("input"); Symbol key = planBuilder.symbol("key"); - Symbol keyHash = planBuilder.symbol("keyHash"); Symbol mask = planBuilder.symbol("mask"); - List sourceSymbols = ImmutableList.of(input, key, keyHash, mask); + List sourceSymbols = ImmutableList.of(input, key, mask); return planBuilder.aggregation(aggregationBuilder -> aggregationBuilder .singleGroupingSet(key) .addAggregation(avg, PlanBuilder.aggregation( @@ -92,7 +91,6 @@ private AggregationNode buildAggregation(PlanBuilder planBuilder) ImmutableMap.of(new Symbol(BIGINT, "input"), SortOrder.ASC_NULLS_LAST))), ImmutableList.of(BIGINT), mask) - .hashSymbol(keyHash) .source(planBuilder.values(sourceSymbols, ImmutableList.of()))); } } diff --git a/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/TestPruneRowNumberColumns.java b/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/TestPruneRowNumberColumns.java index 15cc786a1945..af00e9967960 100644 --- a/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/TestPruneRowNumberColumns.java +++ b/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/TestPruneRowNumberColumns.java @@ -124,10 +124,9 @@ public void testDoNotPruneHashSymbol() .on(p -> { Symbol a = p.symbol("a"); Symbol rowNumber = p.symbol("row_number"); - Symbol hash = p.symbol("hash"); return p.project( Assignments.identity(a, rowNumber), - p.rowNumber(ImmutableList.of(a), Optional.empty(), rowNumber, Optional.of(hash), p.values(a, hash))); + p.rowNumber(ImmutableList.of(a), Optional.empty(), rowNumber, p.values(a))); }) .doesNotFire(); } diff --git a/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/TestPruneTopNRankingColumns.java b/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/TestPruneTopNRankingColumns.java index 28216993db95..51616c27ac5b 100644 --- a/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/TestPruneTopNRankingColumns.java +++ b/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/TestPruneTopNRankingColumns.java @@ -54,7 +54,6 @@ public void testDoNotPrunePartitioningSymbol() ROW_NUMBER, 5, ranking, - Optional.empty(), p.values(a, b))); }) .doesNotFire(); @@ -76,7 +75,6 @@ public void testDoNotPruneOrderingSymbol() ROW_NUMBER, 5, ranking, - Optional.empty(), p.values(a))); }) .doesNotFire(); @@ -88,7 +86,6 @@ public void testDoNotPruneHashSymbol() tester().assertThat(new PruneTopNRankingColumns()) .on(p -> { Symbol a = p.symbol("a"); - Symbol hash = p.symbol("hash"); Symbol ranking = p.symbol("ranking"); return p.project( Assignments.identity(a, ranking), @@ -99,8 +96,7 @@ public void testDoNotPruneHashSymbol() ROW_NUMBER, 5, ranking, - Optional.of(hash), - p.values(a, hash))); + p.values(a))); }) .doesNotFire(); } @@ -122,7 +118,6 @@ public void testSourceSymbolNotReferenced() ROW_NUMBER, 5, ranking, - Optional.empty(), p.values(a, b))); }) .matches( @@ -159,7 +154,6 @@ public void testAllSymbolsReferenced() ROW_NUMBER, 5, ranking, - Optional.empty(), p.values(a, b))); }) .doesNotFire(); @@ -181,7 +175,6 @@ public void testRankingSymbolNotReferenced() ROW_NUMBER, 5, ranking, - Optional.empty(), p.values(a))); }) .doesNotFire(); diff --git a/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/TestPushDownDereferencesRules.java b/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/TestPushDownDereferencesRules.java index 6e174fbc17d4..d0fbddcfe57a 100644 --- a/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/TestPushDownDereferencesRules.java +++ b/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/TestPushDownDereferencesRules.java @@ -550,7 +550,6 @@ public void testPushdownDereferenceThroughTopNRanking() ROW_NUMBER, 5, p.symbol("ranking"), - Optional.empty(), p.values(p.symbol("msg1", ROW_TYPE), p.symbol("msg2", ROW_TYPE), p.symbol("msg3", ROW_TYPE))))) .matches( strictProject( diff --git a/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/test/PlanBuilder.java b/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/test/PlanBuilder.java index df55b1f056f7..f4e0f04b2c9c 100644 --- a/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/test/PlanBuilder.java +++ b/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/test/PlanBuilder.java @@ -353,12 +353,7 @@ public ProjectNode project(Assignments assignments, PlanNode source) public MarkDistinctNode markDistinct(Symbol markerSymbol, List distinctSymbols, PlanNode source) { - return new MarkDistinctNode(idAllocator.getNextId(), source, markerSymbol, distinctSymbols, Optional.empty()); - } - - public MarkDistinctNode markDistinct(Symbol markerSymbol, List distinctSymbols, Symbol hashSymbol, PlanNode source) - { - return new MarkDistinctNode(idAllocator.getNextId(), source, markerSymbol, distinctSymbols, Optional.of(hashSymbol)); + return new MarkDistinctNode(idAllocator.getNextId(), source, markerSymbol, distinctSymbols); } public FilterNode filter(Expression predicate, PlanNode source) @@ -399,19 +394,13 @@ public GroupIdNode groupId(List> groupingSets, Map } public DistinctLimitNode distinctLimit(long count, List distinctSymbols, PlanNode source) - { - return distinctLimit(count, distinctSymbols, Optional.empty(), source); - } - - public DistinctLimitNode distinctLimit(long count, List distinctSymbols, Optional hashSymbol, PlanNode source) { return new DistinctLimitNode( idAllocator.getNextId(), source, count, false, - distinctSymbols, - hashSymbol); + distinctSymbols); } public class AggregationBuilder @@ -421,7 +410,6 @@ public class AggregationBuilder private AggregationNode.GroupingSetDescriptor groupingSets; private List preGroupedSymbols = new ArrayList<>(); private Step step = Step.SINGLE; - private Optional hashSymbol = Optional.empty(); private Optional groupIdSymbol = Optional.empty(); private Optional nodeId = Optional.empty(); private Optional exchangeInputAggregation = Optional.empty(); @@ -492,12 +480,6 @@ public AggregationBuilder step(Step step) return this; } - public AggregationBuilder hashSymbol(Symbol hashSymbol) - { - this.hashSymbol = Optional.of(hashSymbol); - return this; - } - public AggregationBuilder nodeId(PlanNodeId nodeId) { this.nodeId = Optional.of(nodeId); @@ -520,7 +502,6 @@ protected AggregationNode build() groupingSets, preGroupedSymbols, step, - hashSymbol, groupIdSymbol, exchangeInputAggregation); } @@ -1377,11 +1358,6 @@ public WindowNode window(DataOrganizationSpecification specification, Map partitionBy, Optional maxRowCountPerPartition, Symbol rowNumberSymbol, PlanNode source) - { - return rowNumber(partitionBy, maxRowCountPerPartition, rowNumberSymbol, Optional.empty(), source); - } - - public RowNumberNode rowNumber(List partitionBy, Optional maxRowCountPerPartition, Symbol rowNumberSymbol, Optional hashSymbol, PlanNode source) { return new RowNumberNode( idAllocator.getNextId(), @@ -1389,11 +1365,10 @@ public RowNumberNode rowNumber(List partitionBy, Optional maxRo partitionBy, false, rowNumberSymbol, - maxRowCountPerPartition, - hashSymbol); + maxRowCountPerPartition); } - public TopNRankingNode topNRanking(DataOrganizationSpecification specification, RankingType rankingType, int maxRankingPerPartition, Symbol rankingSymbol, Optional hashSymbol, PlanNode source) + public TopNRankingNode topNRanking(DataOrganizationSpecification specification, RankingType rankingType, int maxRankingPerPartition, Symbol rankingSymbol, PlanNode source) { return new TopNRankingNode( idAllocator.getNextId(), @@ -1402,8 +1377,7 @@ public TopNRankingNode topNRanking(DataOrganizationSpecification specification, rankingType, rankingSymbol, maxRankingPerPartition, - false, - hashSymbol); + false); } public PatternRecognitionNode patternRecognition(Consumer consumer) diff --git a/core/trino-main/src/test/java/io/trino/sql/planner/planprinter/TestAnonymizeJsonRepresentation.java b/core/trino-main/src/test/java/io/trino/sql/planner/planprinter/TestAnonymizeJsonRepresentation.java index 46db3c2add6b..b073ada5889b 100644 --- a/core/trino-main/src/test/java/io/trino/sql/planner/planprinter/TestAnonymizeJsonRepresentation.java +++ b/core/trino-main/src/test/java/io/trino/sql/planner/planprinter/TestAnonymizeJsonRepresentation.java @@ -115,8 +115,7 @@ public void testAggregationPlan() "Aggregate", ImmutableMap.of( "type", "FINAL", - "keys", "[symbol_1, symbol_2]", - "hash", "[]"), + "keys", "[symbol_1, symbol_2]"), ImmutableList.of( new Symbol(BIGINT, "symbol_1"), new Symbol(BIGINT, "symbol_2"), diff --git a/core/trino-main/src/test/java/io/trino/sql/planner/planprinter/TestJsonRepresentation.java b/core/trino-main/src/test/java/io/trino/sql/planner/planprinter/TestJsonRepresentation.java index 61f61a9ac34b..2d8c3cce3b99 100644 --- a/core/trino-main/src/test/java/io/trino/sql/planner/planprinter/TestJsonRepresentation.java +++ b/core/trino-main/src/test/java/io/trino/sql/planner/planprinter/TestJsonRepresentation.java @@ -138,8 +138,7 @@ public void testAggregationPlan() "Aggregate", ImmutableMap.of( "type", "FINAL", - "keys", "[y, z]", - "hash", "[]"), + "keys", "[y, z]"), ImmutableList.of( new Symbol(BIGINT, "y"), new Symbol(BIGINT, "z"), From 21a2dd2e0fb50b483c783118f683893384f5637c Mon Sep 17 00:00:00 2001 From: Raunaq Morarka Date: Mon, 16 Jun 2025 00:50:24 +0530 Subject: [PATCH 2/6] Remove unused code for pre-computed hashes in window node --- .../java/io/trino/sql/planner/PlanCopier.java | 2 +- .../io/trino/sql/planner/QueryPlanner.java | 2 - .../iterative/rule/DecorrelateUnnest.java | 1 - .../iterative/rule/GatherAndMergeWindows.java | 1 - .../rule/ImplementLimitWithTies.java | 1 - .../rule/ImplementTableFunctionSource.java | 1 - .../rule/PruneOrderByInWindowAggregation.java | 1 - .../iterative/rule/PruneWindowColumns.java | 2 - .../PushDownDereferencesThroughWindow.java | 1 - .../rule/SetOperationNodeTranslator.java | 1 - .../planner/optimizations/AddExchanges.java | 2 +- .../optimizations/AddLocalExchanges.java | 1 - .../planner/optimizations/SymbolMapper.java | 1 - .../io/trino/sql/planner/plan/WindowNode.java | 12 +----- .../sql/planner/planprinter/PlanPrinter.java | 2 +- .../TestEffectivePredicateExtractor.java | 1 - .../trino/sql/planner/TestTypeValidator.java | 3 -- .../sql/planner/assertions/WindowMatcher.java | 37 +------------------ .../rule/TestPruneWindowColumns.java | 12 ++---- .../iterative/rule/test/PlanBuilder.java | 13 ------- .../sql/planner/plan/TestWindowNode.java | 3 -- 21 files changed, 10 insertions(+), 90 deletions(-) diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/PlanCopier.java b/core/trino-main/src/main/java/io/trino/sql/planner/PlanCopier.java index 08f1eaf97860..b505d9f76925 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/PlanCopier.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/PlanCopier.java @@ -190,7 +190,7 @@ public PlanNode visitSort(SortNode node, RewriteContext context) @Override public PlanNode visitWindow(WindowNode node, RewriteContext context) { - return new WindowNode(idAllocator.getNextId(), context.rewrite(node.getSource()), node.getSpecification(), node.getWindowFunctions(), node.getHashSymbol(), node.getPrePartitionedInputs(), node.getPreSortedOrderPrefix()); + return new WindowNode(idAllocator.getNextId(), context.rewrite(node.getSource()), node.getSpecification(), node.getWindowFunctions(), node.getPrePartitionedInputs(), node.getPreSortedOrderPrefix()); } @Override diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/QueryPlanner.java b/core/trino-main/src/main/java/io/trino/sql/planner/QueryPlanner.java index 2c9a7a763186..d3829ecda844 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/QueryPlanner.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/QueryPlanner.java @@ -324,7 +324,6 @@ public RelationPlan planExpand(Query query) checkConvergenceStep.getNode(), new DataOrganizationSpecification(ImmutableList.of(), Optional.empty()), ImmutableMap.of(countSymbol, countFunction), - Optional.empty(), ImmutableSet.of(), 0); @@ -1808,7 +1807,6 @@ private PlanBuilder planWindow( subPlan.getRoot(), specification, functions.buildOrThrow(), - Optional.empty(), ImmutableSet.of(), 0)); } diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/DecorrelateUnnest.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/DecorrelateUnnest.java index 425eff468890..691cdf318829 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/DecorrelateUnnest.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/DecorrelateUnnest.java @@ -471,7 +471,6 @@ public RewriteResult visitTopN(TopNNode node, Void context) source.getPlan(), new DataOrganizationSpecification(ImmutableList.of(uniqueSymbol), Optional.of(node.getOrderingScheme())), ImmutableMap.of(rowNumberSymbol, rowNumberFunction), - Optional.empty(), ImmutableSet.of(), 0); diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/GatherAndMergeWindows.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/GatherAndMergeWindows.java index 30ac43aa54ee..686c406d1c76 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/GatherAndMergeWindows.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/GatherAndMergeWindows.java @@ -194,7 +194,6 @@ protected Optional manipulateAdjacentWindowNodes(WindowNode parent, Wi child.getSource(), parent.getSpecification(), functionsBuilder.buildOrThrow(), - parent.getHashSymbol(), parent.getPrePartitionedInputs(), parent.getPreSortedOrderPrefix()); diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/ImplementLimitWithTies.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/ImplementLimitWithTies.java index 09f90cffa96f..911f20a79c04 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/ImplementLimitWithTies.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/ImplementLimitWithTies.java @@ -133,7 +133,6 @@ public static PlanNode rewriteLimitWithTiesWithPartitioning(LimitNode limitNode, source, new DataOrganizationSpecification(partitionBy, limitNode.getTiesResolvingScheme()), ImmutableMap.of(rankSymbol, rankFunction), - Optional.empty(), ImmutableSet.of(), 0); diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/ImplementTableFunctionSource.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/ImplementTableFunctionSource.java index 418825f20251..6aa1474c62ea 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/ImplementTableFunctionSource.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/ImplementTableFunctionSource.java @@ -319,7 +319,6 @@ private static NodeWithSymbols planWindowFunctionsForSource( ImmutableMap.of( rowNumber, new WindowNode.Function(rowNumberFunction, ImmutableList.of(), Optional.empty(), FULL_FRAME, false, false), partitionSize, new WindowNode.Function(countFunction, ImmutableList.of(), Optional.empty(), FULL_FRAME, false, false)), - Optional.empty(), ImmutableSet.of(), 0); diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PruneOrderByInWindowAggregation.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PruneOrderByInWindowAggregation.java index c2a97fe97bca..16145329daf0 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PruneOrderByInWindowAggregation.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PruneOrderByInWindowAggregation.java @@ -77,7 +77,6 @@ public Result apply(WindowNode node, Captures captures, Context context) node.getSource(), node.getSpecification(), rewritten.buildOrThrow(), - node.getHashSymbol(), node.getPrePartitionedInputs(), node.getPreSortedOrderPrefix())); } diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PruneWindowColumns.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PruneWindowColumns.java index b1a95f72410a..b2b77d50ab58 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PruneWindowColumns.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PruneWindowColumns.java @@ -56,7 +56,6 @@ protected Optional pushDownProjectOff(Context context, WindowNode wind orderingScheme -> orderingScheme .orderBy() .forEach(referencedInputs::add)); - windowNode.getHashSymbol().ifPresent(referencedInputs::add); for (WindowNode.Function windowFunction : referencedFunctions.values()) { windowFunction.getOrderingScheme().ifPresent(orderingScheme -> referencedInputs.addAll(orderingScheme.orderBy())); @@ -69,7 +68,6 @@ protected Optional pushDownProjectOff(Context context, WindowNode wind .orElse(windowNode.getSource()), windowNode.getSpecification(), referencedFunctions, - windowNode.getHashSymbol(), windowNode.getPrePartitionedInputs(), windowNode.getPreSortedOrderPrefix()); diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushDownDereferencesThroughWindow.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushDownDereferencesThroughWindow.java index fd72a157eda4..f84c894bb35e 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushDownDereferencesThroughWindow.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushDownDereferencesThroughWindow.java @@ -144,7 +144,6 @@ public Result apply(ProjectNode projectNode, Captures captures, Context context) oldFunction.isIgnoreNulls(), oldFunction.isDistinct()); })), - windowNode.getHashSymbol(), windowNode.getPrePartitionedInputs(), windowNode.getPreSortedOrderPrefix()), newAssignments)); diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/SetOperationNodeTranslator.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/SetOperationNodeTranslator.java index e9351ecc7b1c..5ee2201f5be2 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/SetOperationNodeTranslator.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/SetOperationNodeTranslator.java @@ -213,7 +213,6 @@ private WindowNode appendCounts(UnionNode sourceNode, List originalColum sourceNode, new DataOrganizationSpecification(originalColumns, Optional.empty()), functions.buildOrThrow(), - Optional.empty(), ImmutableSet.of(), 0); } diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/AddExchanges.java b/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/AddExchanges.java index da8ff68bd480..4a1754a6af50 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/AddExchanges.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/AddExchanges.java @@ -389,7 +389,7 @@ public PlanWithProperties visitWindow(WindowNode node, PreferredProperties prefe } else { child = withDerivedProperties( - partitionedExchange(idAllocator.getNextId(), REMOTE, child.getNode(), node.getPartitionBy(), node.getHashSymbol()), + partitionedExchange(idAllocator.getNextId(), REMOTE, child.getNode(), node.getPartitionBy(), Optional.empty()), child.getProperties()); } } diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/AddLocalExchanges.java b/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/AddLocalExchanges.java index 041055d9b1d7..9586158ac967 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/AddLocalExchanges.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/AddLocalExchanges.java @@ -418,7 +418,6 @@ public PlanWithProperties visitWindow(WindowNode node, StreamPreferredProperties child.getNode(), node.getSpecification(), node.getWindowFunctions(), - node.getHashSymbol(), prePartitionedInputs, preSortedOrderPrefix); diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/SymbolMapper.java b/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/SymbolMapper.java index a2af75202d71..91bb6a955f75 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/SymbolMapper.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/SymbolMapper.java @@ -260,7 +260,6 @@ public WindowNode map(WindowNode node, PlanNode source) source, newSpecification.specification(), newFunctions.buildOrThrow(), - node.getHashSymbol().map(this::map), node.getPrePartitionedInputs().stream() .map(this::map) .collect(toImmutableSet()), diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/plan/WindowNode.java b/core/trino-main/src/main/java/io/trino/sql/planner/plan/WindowNode.java index 03cbd058f5ec..606158239394 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/plan/WindowNode.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/plan/WindowNode.java @@ -48,7 +48,6 @@ public class WindowNode private final DataOrganizationSpecification specification; private final int preSortedOrderPrefix; private final Map windowFunctions; - private final Optional hashSymbol; @JsonCreator public WindowNode( @@ -56,7 +55,6 @@ public WindowNode( @JsonProperty("source") PlanNode source, @JsonProperty("specification") DataOrganizationSpecification specification, @JsonProperty("windowFunctions") Map windowFunctions, - @JsonProperty("hashSymbol") Optional hashSymbol, @JsonProperty("prePartitionedInputs") Set prePartitionedInputs, @JsonProperty("preSortedOrderPrefix") int preSortedOrderPrefix) { @@ -65,7 +63,6 @@ public WindowNode( requireNonNull(source, "source is null"); requireNonNull(specification, "specification is null"); requireNonNull(windowFunctions, "windowFunctions is null"); - requireNonNull(hashSymbol, "hashSymbol is null"); requireNonNull(prePartitionedInputs, "prePartitionedInputs is null"); // Make the defensive copy eagerly, so it can be used for both the validation checks and assigned directly to the field afterwards prePartitionedInputs = ImmutableSet.copyOf(prePartitionedInputs); @@ -80,7 +77,6 @@ public WindowNode( this.prePartitionedInputs = prePartitionedInputs; this.specification = specification; this.windowFunctions = ImmutableMap.copyOf(windowFunctions); - this.hashSymbol = hashSymbol; this.preSortedOrderPrefix = preSortedOrderPrefix; } @@ -136,12 +132,6 @@ public List getFrames() .collect(toImmutableList()); } - @JsonProperty - public Optional getHashSymbol() - { - return hashSymbol; - } - @JsonProperty public Set getPrePartitionedInputs() { @@ -163,7 +153,7 @@ public R accept(PlanVisitor visitor, C context) @Override public PlanNode replaceChildren(List newChildren) { - return new WindowNode(getId(), Iterables.getOnlyElement(newChildren), specification, windowFunctions, hashSymbol, prePartitionedInputs, preSortedOrderPrefix); + return new WindowNode(getId(), Iterables.getOnlyElement(newChildren), specification, windowFunctions, prePartitionedInputs, preSortedOrderPrefix); } @Immutable diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/planprinter/PlanPrinter.java b/core/trino-main/src/main/java/io/trino/sql/planner/planprinter/PlanPrinter.java index 3190edd42ac8..ca2bb31c933e 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/planprinter/PlanPrinter.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/planprinter/PlanPrinter.java @@ -930,7 +930,7 @@ public Void visitWindow(WindowNode node, Context context) NodeRepresentation nodeOutput = addNode( node, "Window", - descriptor.put("hash", formatHash(node.getHashSymbol())).buildOrThrow(), + descriptor.buildOrThrow(), context); for (Entry entry : node.getWindowFunctions().entrySet()) { diff --git a/core/trino-main/src/test/java/io/trino/sql/planner/TestEffectivePredicateExtractor.java b/core/trino-main/src/test/java/io/trino/sql/planner/TestEffectivePredicateExtractor.java index 50b8ece37f2b..f93904161eba 100644 --- a/core/trino-main/src/test/java/io/trino/sql/planner/TestEffectivePredicateExtractor.java +++ b/core/trino-main/src/test/java/io/trino/sql/planner/TestEffectivePredicateExtractor.java @@ -416,7 +416,6 @@ public void testWindow() ImmutableList.of(new Symbol(BIGINT, "a")), ImmutableMap.of(new Symbol(BIGINT, "a"), SortOrder.ASC_NULLS_LAST)))), ImmutableMap.of(), - Optional.empty(), ImmutableSet.of(), 0); diff --git a/core/trino-main/src/test/java/io/trino/sql/planner/TestTypeValidator.java b/core/trino-main/src/test/java/io/trino/sql/planner/TestTypeValidator.java index 62f2a156e8e2..0e1f9f9968fb 100644 --- a/core/trino-main/src/test/java/io/trino/sql/planner/TestTypeValidator.java +++ b/core/trino-main/src/test/java/io/trino/sql/planner/TestTypeValidator.java @@ -152,7 +152,6 @@ public void testValidWindow() baseTableScan, specification, ImmutableMap.of(windowSymbol, function), - Optional.empty(), ImmutableSet.of(), 0); @@ -247,7 +246,6 @@ public void testInvalidWindowFunctionCall() baseTableScan, specification, ImmutableMap.of(windowSymbol, function), - Optional.empty(), ImmutableSet.of(), 0); @@ -280,7 +278,6 @@ public void testInvalidWindowFunctionSignature() baseTableScan, specification, ImmutableMap.of(windowSymbol, function), - Optional.empty(), ImmutableSet.of(), 0); diff --git a/core/trino-main/src/test/java/io/trino/sql/planner/assertions/WindowMatcher.java b/core/trino-main/src/test/java/io/trino/sql/planner/assertions/WindowMatcher.java index fed6e8633e8a..dfde6cf7f3bb 100644 --- a/core/trino-main/src/test/java/io/trino/sql/planner/assertions/WindowMatcher.java +++ b/core/trino-main/src/test/java/io/trino/sql/planner/assertions/WindowMatcher.java @@ -44,18 +44,15 @@ public final class WindowMatcher private final Optional> prePartitionedInputs; private final Optional> specification; private final Optional preSortedOrderPrefix; - private final Optional> hashSymbol; private WindowMatcher( Optional> prePartitionedInputs, Optional> specification, - Optional preSortedOrderPrefix, - Optional> hashSymbol) + Optional preSortedOrderPrefix) { this.prePartitionedInputs = requireNonNull(prePartitionedInputs, "prePartitionedInputs is null"); this.specification = requireNonNull(specification, "specification is null"); this.preSortedOrderPrefix = requireNonNull(preSortedOrderPrefix, "preSortedOrderPrefix is null"); - this.hashSymbol = requireNonNull(hashSymbol, "hashSymbol is null"); } @Override @@ -94,14 +91,6 @@ public MatchResult detailMatches(PlanNode node, StatsProvider stats, Session ses return NO_MATCH; } - if (!hashSymbol - .map(expectedHashSymbol -> expectedHashSymbol - .map(alias -> alias.toSymbol(symbolAliases)) - .equals(windowNode.getHashSymbol())) - .orElse(true)) { - return NO_MATCH; - } - /* * Window functions produce a symbol (the result of the function call) that we might * want to bind to an alias so we can reference it further up the tree. As such, @@ -119,7 +108,6 @@ public String toString() .add("prePartitionedInputs", prePartitionedInputs.orElse(null)) .add("specification", specification.orElse(null)) .add("preSortedOrderPrefix", preSortedOrderPrefix.orElse(null)) - .add("hashSymbol", hashSymbol.orElse(null)) .toString(); } @@ -135,7 +123,6 @@ public static class Builder private Optional> specification = Optional.empty(); private Optional preSortedOrderPrefix = Optional.empty(); private final List windowFunctionMatchers = new LinkedList<>(); - private Optional> hashSymbol = Optional.empty(); Builder(PlanMatchPattern source) { @@ -191,33 +178,13 @@ public Builder addFunction(String outputAlias, ExpectedValueProvider inputSymbolNameList = - ImmutableList.of("orderKey", "partitionKey", "hash", "startValue1", "startValue2", "endValue1", "endValue2", "input1", "input2", "aggOrderInput1", "aggOrderInput2", "unused"); + ImmutableList.of("orderKey", "partitionKey", "startValue1", "startValue2", "endValue1", "endValue2", "input1", "input2", "aggOrderInput1", "aggOrderInput2", "unused"); private static final Set inputSymbolNameSet = ImmutableSet.copyOf(inputSymbolNameList); private static final WindowNode.Frame FRAME1 = new WindowNode.Frame( @@ -112,8 +112,7 @@ public void testOneFunctionNotNeeded() ImmutableList.of("orderKey"), ImmutableMap.of("orderKey", ASC_NULLS_FIRST)) .preSortedOrderPrefix(0) - .addFunction("output2", windowFunction("min", ImmutableList.of("input2"), FRAME2, List.of(sort("aggOrderInput2", ASCENDING, FIRST)))) - .hashSymbol("hash"), + .addFunction("output2", windowFunction("min", ImmutableList.of("input2"), FRAME2, List.of(sort("aggOrderInput2", ASCENDING, FIRST)))), strictProject( Maps.asMap( Sets.difference(inputSymbolNameSet, ImmutableSet.of("input1", "startValue1", "endValue1", "aggOrderInput1")), @@ -165,8 +164,7 @@ public void testUnusedInputNotNeeded() ImmutableMap.of("orderKey", ASC_NULLS_FIRST)) .preSortedOrderPrefix(0) .addFunction("output1", windowFunction("min", ImmutableList.of("input1"), FRAME1, List.of(sort("aggOrderInput1", ASCENDING, FIRST)))) - .addFunction("output2", windowFunction("min", ImmutableList.of("input2"), FRAME2, List.of(sort("aggOrderInput2", ASCENDING, FIRST)))) - .hashSymbol("hash"), + .addFunction("output2", windowFunction("min", ImmutableList.of("input2"), FRAME2, List.of(sort("aggOrderInput2", ASCENDING, FIRST)))), strictProject( Maps.asMap( Sets.filter(inputSymbolNameSet, symbolName -> !symbolName.equals("unused")), @@ -181,7 +179,6 @@ private static PlanNode buildProjectedWindow( { Symbol orderKey = p.symbol("orderKey"); Symbol partitionKey = p.symbol("partitionKey"); - Symbol hash = p.symbol("hash"); Symbol startValue1 = p.symbol("startValue1"); Symbol startValue2 = p.symbol("startValue2"); Symbol endValue1 = p.symbol("endValue1"); @@ -194,7 +191,7 @@ private static PlanNode buildProjectedWindow( Symbol output1 = p.symbol("output1"); Symbol output2 = p.symbol("output2"); - List inputs = ImmutableList.of(orderKey, partitionKey, hash, startValue1, startValue2, endValue1, endValue2, input1, input2, aggOrderInput1, aggOrderInput2, unused); + List inputs = ImmutableList.of(orderKey, partitionKey, startValue1, startValue2, endValue1, endValue2, input1, input2, aggOrderInput1, aggOrderInput2, unused); List outputs = ImmutableList.builder().addAll(inputs).add(output1, output2).build(); return p.project( @@ -239,7 +236,6 @@ private static PlanNode buildProjectedWindow( Optional.of(orderKey)), false, false)), - hash, p.values( inputs.stream() .filter(sourceFilter) diff --git a/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/test/PlanBuilder.java b/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/test/PlanBuilder.java index f4e0f04b2c9c..774270686b43 100644 --- a/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/test/PlanBuilder.java +++ b/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/test/PlanBuilder.java @@ -1340,19 +1340,6 @@ public WindowNode window(DataOrganizationSpecification specification, Map functions, Symbol hashSymbol, PlanNode source) - { - return new WindowNode( - idAllocator.getNextId(), - source, - specification, - ImmutableMap.copyOf(functions), - Optional.of(hashSymbol), ImmutableSet.of(), 0); } diff --git a/core/trino-main/src/test/java/io/trino/sql/planner/plan/TestWindowNode.java b/core/trino-main/src/test/java/io/trino/sql/planner/plan/TestWindowNode.java index dff244e82c8b..90575f66651b 100644 --- a/core/trino-main/src/test/java/io/trino/sql/planner/plan/TestWindowNode.java +++ b/core/trino-main/src/test/java/io/trino/sql/planner/plan/TestWindowNode.java @@ -94,14 +94,12 @@ public void testSerializationRoundtrip() ImmutableList.of(columnB), ImmutableMap.of(columnB, SortOrder.ASC_NULLS_FIRST)))); Map functions = ImmutableMap.of(windowSymbol, new WindowNode.Function(resolvedFunction, ImmutableList.of(columnC.toSymbolReference()), Optional.empty(), frame, false, false)); - Optional hashSymbol = Optional.of(columnB); Set prePartitionedInputs = ImmutableSet.of(columnA); WindowNode windowNode = new WindowNode( id, sourceNode, specification, functions, - hashSymbol, prePartitionedInputs, 0); @@ -113,7 +111,6 @@ public void testSerializationRoundtrip() assertThat(actualNode.getSpecification()).isEqualTo(windowNode.getSpecification()); assertThat(actualNode.getWindowFunctions()).isEqualTo(windowNode.getWindowFunctions()); assertThat(actualNode.getFrames()).isEqualTo(windowNode.getFrames()); - assertThat(actualNode.getHashSymbol()).isEqualTo(windowNode.getHashSymbol()); assertThat(actualNode.getPrePartitionedInputs()).isEqualTo(windowNode.getPrePartitionedInputs()); assertThat(actualNode.getPreSortedOrderPrefix()).isEqualTo(windowNode.getPreSortedOrderPrefix()); } From 419f1d5c8317734591204f0fc514c7c3e8251d2b Mon Sep 17 00:00:00 2001 From: Raunaq Morarka Date: Mon, 16 Jun 2025 01:08:31 +0530 Subject: [PATCH 3/6] Remove unused code for pre-computed hashes in PatternRecognitionNode --- .../main/java/io/trino/sql/planner/PlanCopier.java | 1 - .../main/java/io/trino/sql/planner/QueryPlanner.java | 2 -- .../java/io/trino/sql/planner/RelationPlanner.java | 1 - .../iterative/rule/ExpressionRewriteRuleSet.java | 1 - .../iterative/rule/MergePatternRecognitionNodes.java | 1 - .../planner/iterative/rule/OptimizeRowPattern.java | 1 - .../iterative/rule/PrunePattenRecognitionColumns.java | 2 -- .../rule/PrunePatternRecognitionSourceColumns.java | 1 - .../PushDownProjectionsFromPatternRecognition.java | 1 - .../trino/sql/planner/optimizations/AddExchanges.java | 2 +- .../sql/planner/optimizations/AddLocalExchanges.java | 1 - .../trino/sql/planner/optimizations/SymbolMapper.java | 1 - .../sql/planner/plan/PatternRecognitionNode.java | 11 ----------- .../io/trino/sql/planner/planprinter/PlanPrinter.java | 2 +- .../rule/test/PatternRecognitionBuilder.java | 1 - .../plan/TestPatternRecognitionNodeSerialization.java | 1 - 16 files changed, 2 insertions(+), 28 deletions(-) diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/PlanCopier.java b/core/trino-main/src/main/java/io/trino/sql/planner/PlanCopier.java index b505d9f76925..782b08c8905d 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/PlanCopier.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/PlanCopier.java @@ -200,7 +200,6 @@ public PlanNode visitPatternRecognition(PatternRecognitionNode node, RewriteCont idAllocator.getNextId(), context.rewrite(node.getSource()), node.getSpecification(), - node.getHashSymbol(), node.getPrePartitionedInputs(), node.getPreSortedOrderPrefix(), node.getWindowFunctions(), diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/QueryPlanner.java b/core/trino-main/src/main/java/io/trino/sql/planner/QueryPlanner.java index d3829ecda844..15e0fbaef771 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/QueryPlanner.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/QueryPlanner.java @@ -1898,7 +1898,6 @@ private PlanBuilder planPatternRecognition( idAllocator.getNextId(), subPlan.getRoot(), specification, - Optional.empty(), ImmutableSet.of(), 0, functions.buildOrThrow(), @@ -2041,7 +2040,6 @@ private PlanBuilder planPatternRecognition( idAllocator.getNextId(), subPlan.getRoot(), specification, - Optional.empty(), ImmutableSet.of(), 0, ImmutableMap.of(), diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/RelationPlanner.java b/core/trino-main/src/main/java/io/trino/sql/planner/RelationPlanner.java index 724bea63b14a..1f695bb4bedf 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/RelationPlanner.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/RelationPlanner.java @@ -658,7 +658,6 @@ protected RelationPlan visitPatternRecognitionRelation(PatternRecognitionRelatio idAllocator.getNextId(), planBuilder.getRoot(), specification, - Optional.empty(), ImmutableSet.of(), 0, ImmutableMap.of(), diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/ExpressionRewriteRuleSet.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/ExpressionRewriteRuleSet.java index 9659f5c09a69..66f631a3945c 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/ExpressionRewriteRuleSet.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/ExpressionRewriteRuleSet.java @@ -383,7 +383,6 @@ public Result apply(PatternRecognitionNode node, Captures captures, Context cont node.getId(), node.getSource(), node.getSpecification(), - node.getHashSymbol(), node.getPrePartitionedInputs(), node.getPreSortedOrderPrefix(), node.getWindowFunctions(), diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/MergePatternRecognitionNodes.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/MergePatternRecognitionNodes.java index 796b99174e60..6d3150415bec 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/MergePatternRecognitionNodes.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/MergePatternRecognitionNodes.java @@ -279,7 +279,6 @@ private static PatternRecognitionNode merge(PatternRecognitionNode parent, Patte parent.getId(), child.getSource(), parent.getSpecification(), - parent.getHashSymbol(), parent.getPrePartitionedInputs(), parent.getPreSortedOrderPrefix(), windowFunctions.buildOrThrow(), diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/OptimizeRowPattern.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/OptimizeRowPattern.java index 9e78ba631198..9df47bea3cf1 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/OptimizeRowPattern.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/OptimizeRowPattern.java @@ -47,7 +47,6 @@ public Result apply(PatternRecognitionNode node, Captures captures, Context cont node.getId(), node.getSource(), node.getSpecification(), - node.getHashSymbol(), node.getPrePartitionedInputs(), node.getPreSortedOrderPrefix(), node.getWindowFunctions(), diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PrunePattenRecognitionColumns.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PrunePattenRecognitionColumns.java index abbdf937aa76..96d01fa923aa 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PrunePattenRecognitionColumns.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PrunePattenRecognitionColumns.java @@ -64,7 +64,6 @@ protected Optional pushDownProjectOff(Context context, PatternRecognit .forEach(referencedInputs::add); referencedInputs.addAll(patternRecognitionNode.getPartitionBy()); patternRecognitionNode.getOrderingScheme().ifPresent(orderingScheme -> referencedInputs.addAll(orderingScheme.orderBy())); - patternRecognitionNode.getHashSymbol().ifPresent(referencedInputs::add); referencedFunctions.values().stream() .map(SymbolsExtractor::extractUnique) .forEach(referencedInputs::addAll); @@ -89,7 +88,6 @@ protected Optional pushDownProjectOff(Context context, PatternRecognit patternRecognitionNode.getId(), prunedSource.orElse(patternRecognitionNode.getSource()), patternRecognitionNode.getSpecification(), - patternRecognitionNode.getHashSymbol(), patternRecognitionNode.getPrePartitionedInputs(), patternRecognitionNode.getPreSortedOrderPrefix(), referencedFunctions, diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PrunePatternRecognitionSourceColumns.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PrunePatternRecognitionSourceColumns.java index 8c618b449b81..4d0da1c9fb8a 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PrunePatternRecognitionSourceColumns.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PrunePatternRecognitionSourceColumns.java @@ -65,7 +65,6 @@ public Result apply(PatternRecognitionNode node, Captures captures, Context cont referencedInputs.addAll(node.getPartitionBy()); node.getOrderingScheme().ifPresent(orderingScheme -> referencedInputs.addAll(orderingScheme.orderBy())); - node.getHashSymbol().ifPresent(referencedInputs::add); node.getMeasures().values().stream() .map(PatternRecognitionNode.Measure::getExpressionAndValuePointers) .map(ExpressionAndValuePointers::getInputSymbols) diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushDownProjectionsFromPatternRecognition.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushDownProjectionsFromPatternRecognition.java index 093af1b0e834..81f315ec8ae9 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushDownProjectionsFromPatternRecognition.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushDownProjectionsFromPatternRecognition.java @@ -95,7 +95,6 @@ public Result apply(PatternRecognitionNode node, Captures captures, Context cont node.getId(), projectNode, node.getSpecification(), - node.getHashSymbol(), node.getPrePartitionedInputs(), node.getPreSortedOrderPrefix(), node.getWindowFunctions(), diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/AddExchanges.java b/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/AddExchanges.java index 4a1754a6af50..d984fac7331b 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/AddExchanges.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/AddExchanges.java @@ -420,7 +420,7 @@ public PlanWithProperties visitPatternRecognition(PatternRecognitionNode node, P } else { child = withDerivedProperties( - partitionedExchange(idAllocator.getNextId(), REMOTE, child.getNode(), node.getPartitionBy(), node.getHashSymbol()), + partitionedExchange(idAllocator.getNextId(), REMOTE, child.getNode(), node.getPartitionBy(), Optional.empty()), child.getProperties()); } } diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/AddLocalExchanges.java b/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/AddLocalExchanges.java index 9586158ac967..f3cdb6b0e614 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/AddLocalExchanges.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/AddLocalExchanges.java @@ -461,7 +461,6 @@ public PlanWithProperties visitPatternRecognition(PatternRecognitionNode node, S node.getId(), child.getNode(), node.getSpecification(), - node.getHashSymbol(), prePartitionedInputs, preSortedOrderPrefix, node.getWindowFunctions(), diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/SymbolMapper.java b/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/SymbolMapper.java index 91bb6a955f75..bdabea08caa1 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/SymbolMapper.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/SymbolMapper.java @@ -325,7 +325,6 @@ public PatternRecognitionNode map(PatternRecognitionNode node, PlanNode source) node.getId(), source, newSpecification.specification(), - node.getHashSymbol().map(this::map), node.getPrePartitionedInputs().stream() .map(this::map) .collect(toImmutableSet()), diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/plan/PatternRecognitionNode.java b/core/trino-main/src/main/java/io/trino/sql/planner/plan/PatternRecognitionNode.java index 15087657d7f4..80f40eb48017 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/plan/PatternRecognitionNode.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/plan/PatternRecognitionNode.java @@ -49,7 +49,6 @@ public class PatternRecognitionNode { private final PlanNode source; private final DataOrganizationSpecification specification; - private final Optional hashSymbol; private final Set prePartitionedInputs; private final int preSortedOrderPrefix; private final Map windowFunctions; @@ -77,7 +76,6 @@ public PatternRecognitionNode( @JsonProperty("id") PlanNodeId id, @JsonProperty("source") PlanNode source, @JsonProperty("specification") DataOrganizationSpecification specification, - @JsonProperty("hashSymbol") Optional hashSymbol, @JsonProperty("prePartitionedInputs") Set prePartitionedInputs, @JsonProperty("preSortedOrderPrefix") int preSortedOrderPrefix, @JsonProperty("windowFunctions") Map windowFunctions, @@ -95,7 +93,6 @@ public PatternRecognitionNode( requireNonNull(source, "source is null"); requireNonNull(specification, "specification is null"); - requireNonNull(hashSymbol, "hashSymbol is null"); checkArgument(specification.partitionBy().containsAll(prePartitionedInputs), "prePartitionedInputs must be contained in partitionBy"); Optional orderingScheme = specification.orderingScheme(); checkArgument(preSortedOrderPrefix == 0 || (orderingScheme.isPresent() && preSortedOrderPrefix <= orderingScheme.get().orderBy().size()), "Cannot have sorted more symbols than those requested"); @@ -115,7 +112,6 @@ public PatternRecognitionNode( this.source = source; this.specification = specification; - this.hashSymbol = hashSymbol; this.prePartitionedInputs = ImmutableSet.copyOf(prePartitionedInputs); this.preSortedOrderPrefix = preSortedOrderPrefix; this.windowFunctions = ImmutableMap.copyOf(windowFunctions); @@ -179,12 +175,6 @@ public Optional getOrderingScheme() return specification.orderingScheme(); } - @JsonProperty - public Optional getHashSymbol() - { - return hashSymbol; - } - @JsonProperty public Set getPrePartitionedInputs() { @@ -264,7 +254,6 @@ public PlanNode replaceChildren(List newChildren) getId(), Iterables.getOnlyElement(newChildren), specification, - hashSymbol, prePartitionedInputs, preSortedOrderPrefix, windowFunctions, diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/planprinter/PlanPrinter.java b/core/trino-main/src/main/java/io/trino/sql/planner/planprinter/PlanPrinter.java index ca2bb31c933e..1ce6f9d4a305 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/planprinter/PlanPrinter.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/planprinter/PlanPrinter.java @@ -983,7 +983,7 @@ public Void visitPatternRecognition(PatternRecognitionNode node, Context context NodeRepresentation nodeOutput = addNode( node, "PatternRecognition", - descriptor.put("hash", formatHash(node.getHashSymbol())).buildOrThrow(), + descriptor.buildOrThrow(), context); if (node.getCommonBaseFrame().isPresent()) { diff --git a/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/test/PatternRecognitionBuilder.java b/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/test/PatternRecognitionBuilder.java index d68249a31efa..7e22eeb96d1c 100644 --- a/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/test/PatternRecognitionBuilder.java +++ b/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/test/PatternRecognitionBuilder.java @@ -160,7 +160,6 @@ public PatternRecognitionNode build(PlanNodeIdAllocator idAllocator) idAllocator.getNextId(), source, new DataOrganizationSpecification(partitionBy, orderBy), - Optional.empty(), ImmutableSet.of(), 0, windowFunctions, diff --git a/core/trino-main/src/test/java/io/trino/sql/planner/plan/TestPatternRecognitionNodeSerialization.java b/core/trino-main/src/test/java/io/trino/sql/planner/plan/TestPatternRecognitionNodeSerialization.java index 321c87a068f5..34c48a5ba306 100644 --- a/core/trino-main/src/test/java/io/trino/sql/planner/plan/TestPatternRecognitionNodeSerialization.java +++ b/core/trino-main/src/test/java/io/trino/sql/planner/plan/TestPatternRecognitionNodeSerialization.java @@ -200,7 +200,6 @@ public void testPatternRecognitionNodeRoundtrip() new PlanNodeId("0"), new ValuesNode(new PlanNodeId("1"), 1), new DataOrganizationSpecification(ImmutableList.of(), Optional.empty()), - Optional.empty(), ImmutableSet.of(), 0, ImmutableMap.of( From 8789c2ea8eb7c41d984b79808fc5a67319fd20cf Mon Sep 17 00:00:00 2001 From: Raunaq Morarka Date: Mon, 16 Jun 2025 02:18:34 +0530 Subject: [PATCH 4/6] Remove unused code for pre-computed hashes in TableFunctionProcessorNode --- .../rule/ImplementTableFunctionSource.java | 3 --- .../PruneTableFunctionProcessorColumns.java | 1 - ...neTableFunctionProcessorSourceColumns.java | 3 --- .../planner/optimizations/AddExchanges.java | 2 +- .../optimizations/AddLocalExchanges.java | 1 - .../planner/optimizations/SymbolMapper.java | 1 - .../UnaliasSymbolReferences.java | 1 - .../plan/TableFunctionProcessorNode.java | 10 --------- .../sql/planner/planprinter/PlanPrinter.java | 2 +- .../TableFunctionProcessorMatcher.java | 21 ++----------------- ...neTableFunctionProcessorSourceColumns.java | 10 +++------ .../test/TableFunctionProcessorBuilder.java | 8 ------- 12 files changed, 7 insertions(+), 56 deletions(-) diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/ImplementTableFunctionSource.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/ImplementTableFunctionSource.java index 6aa1474c62ea..caf266363d56 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/ImplementTableFunctionSource.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/ImplementTableFunctionSource.java @@ -165,7 +165,6 @@ public Result apply(TableFunctionNode node, Captures captures, Context context) Optional.empty(), ImmutableSet.of(), 0, - Optional.empty(), node.getHandle())); } @@ -187,7 +186,6 @@ public Result apply(TableFunctionNode node, Captures captures, Context context) sourceProperties.specification(), ImmutableSet.of(), 0, - Optional.empty(), node.getHandle())); } Map sources = mapSourcesByName(node.getSources(), node.getTableArgumentProperties()); @@ -282,7 +280,6 @@ public Result apply(TableFunctionNode node, Captures captures, Context context) Optional.of(new DataOrganizationSpecification(finalPartitionBy, finalOrderBy)), ImmutableSet.of(), 0, - Optional.empty(), node.getHandle())); } diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PruneTableFunctionProcessorColumns.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PruneTableFunctionProcessorColumns.java index 84d9cdf8406e..789942726a17 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PruneTableFunctionProcessorColumns.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PruneTableFunctionProcessorColumns.java @@ -81,7 +81,6 @@ protected Optional pushDownProjectOff(Context context, TableFunctionPr node.getSpecification(), node.getPrePartitioned(), node.getPreSorted(), - node.getHashSymbol(), node.getHandle())); } } diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PruneTableFunctionProcessorSourceColumns.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PruneTableFunctionProcessorSourceColumns.java index fdf3b8811150..3d77128ad907 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PruneTableFunctionProcessorSourceColumns.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PruneTableFunctionProcessorSourceColumns.java @@ -77,8 +77,6 @@ public Result apply(TableFunctionProcessorNode node, Captures captures, Context specification.orderingScheme().ifPresent(orderingScheme -> requiredInputs.addAll(orderingScheme.orderBy())); }); - node.getHashSymbol().ifPresent(requiredInputs::add); - Optional> updatedMarkerSymbols = node.getMarkerSymbols() .map(mapping -> filterKeys(mapping, requiredInputs.build()::contains)); @@ -97,7 +95,6 @@ public Result apply(TableFunctionProcessorNode node, Captures captures, Context node.getSpecification(), node.getPrePartitioned(), node.getPreSorted(), - node.getHashSymbol(), node.getHandle()))) .orElse(Result.empty()); } diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/AddExchanges.java b/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/AddExchanges.java index d984fac7331b..bebc382c0586 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/AddExchanges.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/AddExchanges.java @@ -472,7 +472,7 @@ else if (!isNodePartitionedOn(child.getProperties(), partitionBy)) { } else { child = withDerivedProperties( - partitionedExchange(idAllocator.getNextId(), REMOTE, child.getNode(), partitionBy, node.getHashSymbol()), + partitionedExchange(idAllocator.getNextId(), REMOTE, child.getNode(), partitionBy, Optional.empty()), child.getProperties()); } } diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/AddLocalExchanges.java b/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/AddLocalExchanges.java index f3cdb6b0e614..c71a16a8e10e 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/AddLocalExchanges.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/AddLocalExchanges.java @@ -546,7 +546,6 @@ public PlanWithProperties visitTableFunctionProcessor(TableFunctionProcessorNode node.getSpecification(), prePartitionedInputs, preSortedOrderPrefix, - node.getHashSymbol(), node.getHandle()); return deriveProperties(result, child.getProperties()); diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/SymbolMapper.java b/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/SymbolMapper.java index bdabea08caa1..2b276d10a678 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/SymbolMapper.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/SymbolMapper.java @@ -436,7 +436,6 @@ public TableFunctionProcessorNode map(TableFunctionProcessorNode node, PlanNode .map(this::map) .collect(toImmutableSet()), newSpecification.map(SpecificationWithPreSortedPrefix::preSorted).orElse(node.getPreSorted()), - node.getHashSymbol().map(this::map), node.getHandle()); } diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/UnaliasSymbolReferences.java b/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/UnaliasSymbolReferences.java index ecc71a7bbffa..00160c2ae78e 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/UnaliasSymbolReferences.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/UnaliasSymbolReferences.java @@ -365,7 +365,6 @@ public PlanAndMappings visitTableFunctionProcessor(TableFunctionProcessorNode no Optional.empty(), ImmutableSet.of(), 0, - node.getHashSymbol().map(mapper::map), node.getHandle()), mapping); } diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/plan/TableFunctionProcessorNode.java b/core/trino-main/src/main/java/io/trino/sql/planner/plan/TableFunctionProcessorNode.java index 33de04c9f2b4..d69f2bd101b0 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/plan/TableFunctionProcessorNode.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/plan/TableFunctionProcessorNode.java @@ -66,7 +66,6 @@ public class TableFunctionProcessorNode private final Optional specification; private final Set prePartitioned; private final int preSorted; - private final Optional hashSymbol; private final TableFunctionHandle handle; @@ -83,7 +82,6 @@ public TableFunctionProcessorNode( @JsonProperty("specification") Optional specification, @JsonProperty("prePartitioned") Set prePartitioned, @JsonProperty("preSorted") int preSorted, - @JsonProperty("hashSymbol") Optional hashSymbol, @JsonProperty("handle") TableFunctionHandle handle) { super(id); @@ -112,7 +110,6 @@ public TableFunctionProcessorNode( .orElse(0) >= preSorted, "the number of pre-sorted symbols cannot be greater than the number of all ordering symbols"); checkArgument(preSorted == 0 || partitionBy.equals(prePartitioned), "to specify pre-sorted symbols, it is required that all partitioning symbols are pre-partitioned"); - this.hashSymbol = requireNonNull(hashSymbol, "hashSymbol is null"); this.handle = requireNonNull(handle, "handle is null"); } @@ -176,12 +173,6 @@ public int getPreSorted() return preSorted; } - @JsonProperty - public Optional getHashSymbol() - { - return hashSymbol; - } - @JsonProperty public TableFunctionHandle getHandle() { @@ -233,7 +224,6 @@ public PlanNode replaceChildren(List newSources) specification, prePartitioned, preSorted, - hashSymbol, handle); } } diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/planprinter/PlanPrinter.java b/core/trino-main/src/main/java/io/trino/sql/planner/planprinter/PlanPrinter.java index 1ce6f9d4a305..5a5e4c9426dd 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/planprinter/PlanPrinter.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/planprinter/PlanPrinter.java @@ -1967,7 +1967,7 @@ public Void visitTableFunctionProcessor(TableFunctionProcessorNode node, Context specification.orderingScheme().ifPresent(orderingScheme -> descriptor.put("orderBy", formatOrderingScheme(orderingScheme, node.getPreSorted()))); }); - addNode(node, "TableFunctionProcessor", descriptor.put("hash", formatHash(node.getHashSymbol())).buildOrThrow(), context); + addNode(node, "TableFunctionProcessor", descriptor.buildOrThrow(), context); return processChildren(node, new Context(context.isInitialPlan())); } diff --git a/core/trino-main/src/test/java/io/trino/sql/planner/assertions/TableFunctionProcessorMatcher.java b/core/trino-main/src/test/java/io/trino/sql/planner/assertions/TableFunctionProcessorMatcher.java index b0b6329d019b..c7b6b5924dc4 100644 --- a/core/trino-main/src/test/java/io/trino/sql/planner/assertions/TableFunctionProcessorMatcher.java +++ b/core/trino-main/src/test/java/io/trino/sql/planner/assertions/TableFunctionProcessorMatcher.java @@ -48,7 +48,6 @@ public class TableFunctionProcessorMatcher private final List> requiredSymbols; private final Optional> markerSymbols; private final Optional> specification; - private final Optional hashSymbol; private TableFunctionProcessorMatcher( String name, @@ -56,8 +55,7 @@ private TableFunctionProcessorMatcher( List> passThroughSymbols, List> requiredSymbols, Optional> markerSymbols, - Optional> specification, - Optional hashSymbol) + Optional> specification) { this.name = requireNonNull(name, "name is null"); this.properOutputs = ImmutableList.copyOf(properOutputs); @@ -69,7 +67,6 @@ private TableFunctionProcessorMatcher( .collect(toImmutableList()); this.markerSymbols = markerSymbols.map(ImmutableMap::copyOf); this.specification = requireNonNull(specification, "specification is null"); - this.hashSymbol = requireNonNull(hashSymbol, "hashSymbol is null"); } @Override @@ -145,12 +142,6 @@ public MatchResult detailMatches(PlanNode node, StatsProvider stats, Session ses } } - if (hashSymbol.isPresent()) { - if (!hashSymbol.map(symbolAliases::get).equals(tableFunctionProcessorNode.getHashSymbol().map(Symbol::toSymbolReference))) { - return NO_MATCH; - } - } - ImmutableMap.Builder properOutputsMapping = ImmutableMap.builder(); for (int i = 0; i < properOutputs.size(); i++) { properOutputsMapping.put(properOutputs.get(i), tableFunctionProcessorNode.getProperOutputs().get(i).toSymbolReference()); @@ -173,7 +164,6 @@ public String toString() .add("requiredSymbols", requiredSymbols) .add("markerSymbols", markerSymbols) .add("specification", specification) - .add("hashSymbol", hashSymbol) .toString(); } @@ -186,7 +176,6 @@ public static class Builder private List> requiredSymbols = ImmutableList.of(); private Optional> markerSymbols = Optional.empty(); private Optional> specification = Optional.empty(); - private Optional hashSymbol = Optional.empty(); public Builder() { @@ -234,17 +223,11 @@ public Builder specification(ExpectedValueProvider new PlanMatchPattern[] {sourcePattern}).orElse(new PlanMatchPattern[] {}); return node(TableFunctionProcessorNode.class, sources) - .with(new TableFunctionProcessorMatcher(name, properOutputs, passThroughSymbols, requiredSymbols, markerSymbols, specification, hashSymbol)); + .with(new TableFunctionProcessorMatcher(name, properOutputs, passThroughSymbols, requiredSymbols, markerSymbols, specification)); } } } diff --git a/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/TestPruneTableFunctionProcessorSourceColumns.java b/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/TestPruneTableFunctionProcessorSourceColumns.java index 719a5c09076d..f70dce25c667 100644 --- a/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/TestPruneTableFunctionProcessorSourceColumns.java +++ b/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/TestPruneTableFunctionProcessorSourceColumns.java @@ -50,7 +50,6 @@ public void testPruneUnreferencedSymbol() Symbol c = p.symbol("c"); Symbol d = p.symbol("d"); Symbol unreferenced = p.symbol("unreferenced"); - Symbol hash = p.symbol("hash"); Symbol marker = p.symbol("marker"); return p.tableFunctionProcessor( builder -> builder @@ -65,8 +64,7 @@ public void testPruneUnreferencedSymbol() d, marker, unreferenced, marker)) .specification(new DataOrganizationSpecification(ImmutableList.of(c), Optional.of(new OrderingScheme(ImmutableList.of(d), ImmutableMap.of(d, ASC_NULLS_FIRST))))) - .hashSymbol(hash) - .source(p.values(a, b, c, d, unreferenced, hash, marker))); + .source(p.values(a, b, c, d, unreferenced, marker))); }) .matches(tableFunctionProcessor(builder -> builder .name("test_function") @@ -78,17 +76,15 @@ public void testPruneUnreferencedSymbol() "b", "marker", "c", "marker", "d", "marker")) - .specification(specification(ImmutableList.of("c"), ImmutableList.of("d"), ImmutableMap.of("d", ASC_NULLS_FIRST))) - .hashSymbol("hash"), + .specification(specification(ImmutableList.of("c"), ImmutableList.of("d"), ImmutableMap.of("d", ASC_NULLS_FIRST))), project( ImmutableMap.of( "a", expression(new Reference(BIGINT, "a")), "b", expression(new Reference(BIGINT, "b")), "c", expression(new Reference(BIGINT, "c")), "d", expression(new Reference(BIGINT, "d")), - "hash", expression(new Reference(BIGINT, "hash")), "marker", expression(new Reference(BIGINT, "marker"))), - values("a", "b", "c", "d", "unreferenced", "hash", "marker")))); + values("a", "b", "c", "d", "unreferenced", "marker")))); } @Test diff --git a/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/test/TableFunctionProcessorBuilder.java b/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/test/TableFunctionProcessorBuilder.java index 5fb7321d11fe..c90568e027d4 100644 --- a/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/test/TableFunctionProcessorBuilder.java +++ b/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/test/TableFunctionProcessorBuilder.java @@ -44,7 +44,6 @@ public class TableFunctionProcessorBuilder private Optional specification = Optional.empty(); private Set prePartitioned = ImmutableSet.of(); private int preSorted; - private Optional hashSymbol = Optional.empty(); private ConnectorTableFunctionHandle connectorHandle = new ConnectorTableFunctionHandle() {}; public TableFunctionProcessorBuilder() {} @@ -109,12 +108,6 @@ public TableFunctionProcessorBuilder preSorted(int preSorted) return this; } - public TableFunctionProcessorBuilder hashSymbol(Symbol hashSymbol) - { - this.hashSymbol = Optional.of(hashSymbol); - return this; - } - public TableFunctionProcessorBuilder connectorHandle(ConnectorTableFunctionHandle connectorHandle) { this.connectorHandle = connectorHandle; @@ -135,7 +128,6 @@ public TableFunctionProcessorNode build(PlanNodeIdAllocator idAllocator) specification, prePartitioned, preSorted, - hashSymbol, new TableFunctionHandle(TEST_CATALOG_HANDLE, connectorHandle, TestingTransactionHandle.create())); } } From 7ff99f7cc4100d735c4f6298b2aebc4faa3b1267 Mon Sep 17 00:00:00 2001 From: Raunaq Morarka Date: Mon, 16 Jun 2025 10:14:57 +0530 Subject: [PATCH 5/6] Remove unused code for pre-computed hashes in exchanges --- .../operator/exchange/LocalExchange.java | 20 ++---- .../sql/planner/LocalExecutionPlanner.java | 62 +++++++------------ .../io/trino/sql/planner/LogicalPlanner.java | 2 - .../sql/planner/NodePartitioningManager.java | 2 - .../trino/sql/planner/PartitioningScheme.java | 37 ++--------- .../io/trino/sql/planner/PlanFragmenter.java | 1 - .../sql/planner/SystemPartitioningHandle.java | 20 +++--- .../rule/AdaptiveReorderPartitionedJoin.java | 8 +-- ...wPartialAggregationOverGroupIdRuleSet.java | 1 - .../iterative/rule/PruneExchangeColumns.java | 2 - .../rule/PruneTableExecuteSourceColumns.java | 1 - .../rule/PruneTableWriterSourceColumns.java | 1 - ...PushPartialAggregationThroughExchange.java | 6 -- .../rule/PushProjectionThroughExchange.java | 11 ---- ...shRemoteExchangeThroughAssignUniqueId.java | 1 - .../planner/optimizations/AddExchanges.java | 35 +++++------ .../optimizations/AddLocalExchanges.java | 9 +-- .../planner/optimizations/SymbolMapper.java | 1 - .../trino/sql/planner/plan/ExchangeNode.java | 7 +-- .../sql/planner/planprinter/PlanPrinter.java | 15 ++--- .../io/trino/cost/TestCostCalculator.java | 8 +-- .../operator/exchange/TestLocalExchange.java | 18 ------ .../io/trino/operator/join/JoinTestUtils.java | 1 - .../join/unspilled/JoinTestUtils.java | 1 - .../operator/output/TestPagePartitioner.java | 2 +- .../rule/TestPruneExchangeColumns.java | 22 ------- .../TestPruneTableExecuteSourceColumns.java | 6 +- .../TestPruneTableWriterSourceColumns.java | 6 +- .../TestPushProjectionThroughExchange.java | 36 ++++------- .../iterative/rule/test/PlanBuilder.java | 16 +---- .../execution/TestEventListenerBasic.java | 1 - 31 files changed, 91 insertions(+), 268 deletions(-) diff --git a/core/trino-main/src/main/java/io/trino/operator/exchange/LocalExchange.java b/core/trino-main/src/main/java/io/trino/operator/exchange/LocalExchange.java index 0c596e7b4d24..76be2ced34d3 100644 --- a/core/trino-main/src/main/java/io/trino/operator/exchange/LocalExchange.java +++ b/core/trino-main/src/main/java/io/trino/operator/exchange/LocalExchange.java @@ -24,7 +24,6 @@ import io.trino.operator.BucketPartitionFunction; import io.trino.operator.HashGenerator; import io.trino.operator.PartitionFunction; -import io.trino.operator.PrecomputedHashGenerator; import io.trino.operator.output.SkewedPartitionRebalancer; import io.trino.spi.Page; import io.trino.spi.type.Type; @@ -37,7 +36,6 @@ import java.io.Closeable; import java.util.HashSet; import java.util.List; -import java.util.Optional; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; @@ -94,7 +92,6 @@ public LocalExchange( PartitioningHandle partitioning, List partitionChannels, List partitionChannelTypes, - Optional partitionHashChannel, DataSize maxBufferedBytes, TypeOperators typeOperators, DataSize writerScalingMinDataProcessed, @@ -159,8 +156,7 @@ else if (isScaledWriterHashDistribution(partitioning)) { partitioning, partitionCount, partitionChannels, - partitionChannelTypes, - partitionHashChannel); + partitionChannelTypes); return new ScaleWriterPartitioningExchanger( asPageConsumers(sources), memoryManager, @@ -187,8 +183,7 @@ else if (partitioning.equals(FIXED_HASH_DISTRIBUTION) || partitioning.getCatalog partitioning, bufferCount, partitionChannels, - partitionChannelTypes, - partitionHashChannel); + partitionChannelTypes); return new PartitioningExchanger( asPageConsumers(sources), memoryManager, @@ -242,19 +237,12 @@ private static PartitionFunction createPartitionFunction( PartitioningHandle partitioning, int partitionCount, List partitionChannels, - List partitionChannelTypes, - Optional partitionHashChannel) + List partitionChannelTypes) { checkArgument(Integer.bitCount(partitionCount) == 1, "partitionCount must be a power of 2"); if (isSystemPartitioning(partitioning)) { - HashGenerator hashGenerator; - if (partitionHashChannel.isPresent()) { - hashGenerator = new PrecomputedHashGenerator(partitionHashChannel.get()); - } - else { - hashGenerator = createChannelsHashGenerator(partitionChannelTypes, Ints.toArray(partitionChannels), typeOperators); - } + HashGenerator hashGenerator = createChannelsHashGenerator(partitionChannelTypes, Ints.toArray(partitionChannels), typeOperators); return new LocalPartitionGenerator(hashGenerator, partitionCount); } diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/LocalExecutionPlanner.java b/core/trino-main/src/main/java/io/trino/sql/planner/LocalExecutionPlanner.java index 2e9d1290f299..541dac738693 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/LocalExecutionPlanner.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/LocalExecutionPlanner.java @@ -566,40 +566,30 @@ public LocalExecutionPlan plan( } // We can convert the symbols directly into channels, because the root must be a sink and therefore the layout is fixed - List partitionChannels; - List> partitionConstants; - List partitionChannelTypes; - if (partitioningScheme.getHashColumn().isPresent()) { - partitionChannels = ImmutableList.of(outputLayout.indexOf(partitioningScheme.getHashColumn().get())); - partitionConstants = ImmutableList.of(Optional.empty()); - partitionChannelTypes = ImmutableList.of(BIGINT); - } - else { - partitionChannels = partitioningScheme.getPartitioning().getArguments().stream() - .map(argument -> { - if (argument.isConstant()) { - return -1; - } - return outputLayout.indexOf(argument.getColumn()); - }) - .collect(toImmutableList()); - partitionConstants = partitioningScheme.getPartitioning().getArguments().stream() - .map(argument -> { - if (argument.isConstant()) { - return Optional.of(argument.getConstant()); - } - return Optional.empty(); - }) - .collect(toImmutableList()); - partitionChannelTypes = partitioningScheme.getPartitioning().getArguments().stream() - .map(argument -> { - if (argument.isConstant()) { - return argument.getConstant().getType(); - } - return argument.getColumn().type(); - }) - .collect(toImmutableList()); - } + List partitionChannels = partitioningScheme.getPartitioning().getArguments().stream() + .map(argument -> { + if (argument.isConstant()) { + return -1; + } + return outputLayout.indexOf(argument.getColumn()); + }) + .collect(toImmutableList()); + List> partitionConstants = partitioningScheme.getPartitioning().getArguments().stream() + .map(argument -> { + if (argument.isConstant()) { + return Optional.of(argument.getConstant()); + } + return Optional.empty(); + }) + .collect(toImmutableList()); + List partitionChannelTypes = partitioningScheme.getPartitioning().getArguments().stream() + .map(argument -> { + if (argument.isConstant()) { + return argument.getConstant().getType(); + } + return argument.getColumn().type(); + }) + .collect(toImmutableList()); PartitionFunction partitionFunction; Optional skewedPartitionRebalancer = Optional.empty(); @@ -3706,7 +3696,6 @@ private PhysicalOperation createLocalMerge(ExchangeNode node, LocalExecutionPlan node.getPartitioningScheme().getPartitioning().getHandle(), ImmutableList.of(), ImmutableList.of(), - Optional.empty(), maxLocalExchangeBufferSize, typeOperators, getWriterScalingMinDataProcessed(session), @@ -3761,8 +3750,6 @@ else if (context.getDriverInstanceCount().isPresent()) { List partitionChannels = node.getPartitioningScheme().getPartitioning().getArguments().stream() .map(argument -> node.getOutputSymbols().indexOf(argument.getColumn())) .collect(toImmutableList()); - Optional hashChannel = node.getPartitioningScheme().getHashColumn() - .map(symbol -> node.getOutputSymbols().indexOf(symbol)); List partitionChannelTypes = partitionChannels.stream() .map(types::get) .collect(toImmutableList()); @@ -3783,7 +3770,6 @@ else if (context.getDriverInstanceCount().isPresent()) { node.getPartitioningScheme().getPartitioning().getHandle(), partitionChannels, partitionChannelTypes, - hashChannel, maxLocalExchangeBufferSize, typeOperators, getWriterScalingMinDataProcessed(session), diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/LogicalPlanner.java b/core/trino-main/src/main/java/io/trino/sql/planner/LogicalPlanner.java index 55289841d68a..c1d69a158f5a 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/LogicalPlanner.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/LogicalPlanner.java @@ -722,7 +722,6 @@ else if (isUsePreferredWritePartitioning(session)) { partitioningScheme = Optional.of(new PartitioningScheme( Partitioning.create(FIXED_HASH_DISTRIBUTION, partitionFunctionArguments), outputLayout, - Optional.empty(), false, Optional.empty(), maxWritersNodesCount)); @@ -1015,7 +1014,6 @@ else if (isUsePreferredWritePartitioning(session)) { partitioningScheme = Optional.of(new PartitioningScheme( Partitioning.create(FIXED_HASH_DISTRIBUTION, partitionFunctionArguments), outputLayout, - Optional.empty(), false, Optional.empty(), maxWritersNodesCount)); diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/NodePartitioningManager.java b/core/trino-main/src/main/java/io/trino/sql/planner/NodePartitioningManager.java index 01e8df9c35e0..9bfd504c84ba 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/NodePartitioningManager.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/NodePartitioningManager.java @@ -89,7 +89,6 @@ public PartitionFunction getPartitionFunction( if (partitioningHandle.getConnectorHandle() instanceof SystemPartitioningHandle) { return ((SystemPartitioningHandle) partitioningHandle.getConnectorHandle()).getPartitionFunction( partitionChannelTypes, - partitioningScheme.getHashColumn().isPresent(), bucketToPartition, typeOperators); } @@ -111,7 +110,6 @@ public PartitionFunction getPartitionFunction(Session session, PartitioningSchem if (partitioningHandle.getConnectorHandle() instanceof SystemPartitioningHandle handle) { return handle.getPartitionFunction( partitionChannelTypes, - partitioningScheme.getHashColumn().isPresent(), bucketToPartition, typeOperators); } diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/PartitioningScheme.java b/core/trino-main/src/main/java/io/trino/sql/planner/PartitioningScheme.java index 751f3cb580b8..f2ddb4e4f24d 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/PartitioningScheme.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/PartitioningScheme.java @@ -31,7 +31,6 @@ public class PartitioningScheme { private final Partitioning partitioning; private final List outputLayout; - private final Optional hashColumn; private final boolean replicateNullsAndAny; private final Optional bucketToPartition; private final Optional partitionCount; @@ -41,18 +40,6 @@ public PartitioningScheme(Partitioning partitioning, List outputLayout) this( partitioning, outputLayout, - Optional.empty(), - false, - Optional.empty(), - Optional.empty()); - } - - public PartitioningScheme(Partitioning partitioning, List outputLayout, Optional hashColumn) - { - this( - partitioning, - outputLayout, - hashColumn, false, Optional.empty(), Optional.empty()); @@ -62,7 +49,6 @@ public PartitioningScheme(Partitioning partitioning, List outputLayout, public PartitioningScheme( @JsonProperty("partitioning") Partitioning partitioning, @JsonProperty("outputLayout") List outputLayout, - @JsonProperty("hashColumn") Optional hashColumn, @JsonProperty("replicateNullsAndAny") boolean replicateNullsAndAny, @JsonProperty("bucketToPartition") Optional bucketToPartition, @JsonProperty("partitionCount") Optional partitionCount) @@ -74,10 +60,6 @@ public PartitioningScheme( checkArgument(ImmutableSet.copyOf(outputLayout).containsAll(columns), "Output layout (%s) don't include all partition columns (%s)", outputLayout, columns); - this.hashColumn = requireNonNull(hashColumn, "hashColumn is null"); - hashColumn.ifPresent(column -> checkArgument(outputLayout.contains(column), - "Output layout (%s) don't include hash column (%s)", outputLayout, column)); - checkArgument(!replicateNullsAndAny || columns.size() <= 1, "Must have at most one partitioning column when nullPartition is REPLICATE."); this.replicateNullsAndAny = replicateNullsAndAny; this.bucketToPartition = requireNonNull(bucketToPartition, "bucketToPartition is null"); @@ -99,12 +81,6 @@ public List getOutputLayout() return outputLayout; } - @JsonProperty - public Optional getHashColumn() - { - return hashColumn; - } - @JsonProperty public boolean isReplicateNullsAndAny() { @@ -125,18 +101,18 @@ public Optional getPartitionCount() public PartitioningScheme withBucketToPartition(Optional bucketToPartition) { - return new PartitioningScheme(partitioning, outputLayout, hashColumn, replicateNullsAndAny, bucketToPartition, partitionCount); + return new PartitioningScheme(partitioning, outputLayout, replicateNullsAndAny, bucketToPartition, partitionCount); } public PartitioningScheme withPartitioningHandle(PartitioningHandle partitioningHandle) { Partitioning newPartitioning = partitioning.withAlternativePartitioningHandle(partitioningHandle); - return new PartitioningScheme(newPartitioning, outputLayout, hashColumn, replicateNullsAndAny, bucketToPartition, partitionCount); + return new PartitioningScheme(newPartitioning, outputLayout, replicateNullsAndAny, bucketToPartition, partitionCount); } public PartitioningScheme withPartitionCount(Optional partitionCount) { - return new PartitioningScheme(partitioning, outputLayout, hashColumn, replicateNullsAndAny, bucketToPartition, partitionCount); + return new PartitioningScheme(partitioning, outputLayout, replicateNullsAndAny, bucketToPartition, partitionCount); } public PartitioningScheme translateOutputLayout(List newOutputLayout) @@ -147,11 +123,7 @@ public PartitioningScheme translateOutputLayout(List newOutputLayout) Partitioning newPartitioning = partitioning.translate(symbol -> newOutputLayout.get(outputLayout.indexOf(symbol))); - Optional newHashSymbol = hashColumn - .map(outputLayout::indexOf) - .map(newOutputLayout::get); - - return new PartitioningScheme(newPartitioning, newOutputLayout, newHashSymbol, replicateNullsAndAny, bucketToPartition, partitionCount); + return new PartitioningScheme(newPartitioning, newOutputLayout, replicateNullsAndAny, bucketToPartition, partitionCount); } @Override @@ -183,7 +155,6 @@ public String toString() return toStringHelper(this) .add("partitioning", partitioning) .add("outputLayout", outputLayout) - .add("hashChannel", hashColumn) .add("replicateNullsAndAny", replicateNullsAndAny) .add("bucketToPartition", bucketToPartition) .add("partitionCount", partitionCount) diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/PlanFragmenter.java b/core/trino-main/src/main/java/io/trino/sql/planner/PlanFragmenter.java index 4f66b75b15c0..3d4eac0c0292 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/PlanFragmenter.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/PlanFragmenter.java @@ -224,7 +224,6 @@ private SubPlan reassignPartitioningHandleIfNecessaryHelper(Session session, Sub new PartitioningScheme( newOutputPartitioning, outputPartitioningScheme.getOutputLayout(), - outputPartitioningScheme.getHashColumn(), outputPartitioningScheme.isReplicateNullsAndAny(), outputPartitioningScheme.getBucketToPartition(), outputPartitioningScheme.getPartitionCount()), diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/SystemPartitioningHandle.java b/core/trino-main/src/main/java/io/trino/sql/planner/SystemPartitioningHandle.java index 1335edab0085..b60183f9b701 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/SystemPartitioningHandle.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/SystemPartitioningHandle.java @@ -17,7 +17,6 @@ import com.fasterxml.jackson.annotation.JsonProperty; import io.trino.operator.BucketPartitionFunction; import io.trino.operator.PartitionFunction; -import io.trino.operator.PrecomputedHashGenerator; import io.trino.spi.Page; import io.trino.spi.connector.BucketFunction; import io.trino.spi.connector.ConnectorPartitioningHandle; @@ -136,12 +135,12 @@ public String toString() return partitioning.toString(); } - public PartitionFunction getPartitionFunction(List partitionChannelTypes, boolean isHashPrecomputed, int[] bucketToPartition, TypeOperators typeOperators) + public PartitionFunction getPartitionFunction(List partitionChannelTypes, int[] bucketToPartition, TypeOperators typeOperators) { requireNonNull(partitionChannelTypes, "partitionChannelTypes is null"); requireNonNull(bucketToPartition, "bucketToPartition is null"); - BucketFunction bucketFunction = function.createBucketFunction(partitionChannelTypes, isHashPrecomputed, bucketToPartition.length, typeOperators); + BucketFunction bucketFunction = function.createBucketFunction(partitionChannelTypes, bucketToPartition.length, typeOperators); return new BucketPartitionFunction(bucketFunction, bucketToPartition); } @@ -149,7 +148,7 @@ public enum SystemPartitionFunction { SINGLE { @Override - public BucketFunction createBucketFunction(List partitionChannelTypes, boolean isHashPrecomputed, int bucketCount, TypeOperators typeOperators) + public BucketFunction createBucketFunction(List partitionChannelTypes, int bucketCount, TypeOperators typeOperators) { checkArgument(bucketCount == 1, "Single partition can only have one bucket"); return new SingleBucketFunction(); @@ -157,39 +156,34 @@ public BucketFunction createBucketFunction(List partitionChannelTypes, boo }, HASH { @Override - public BucketFunction createBucketFunction(List partitionChannelTypes, boolean isHashPrecomputed, int bucketCount, TypeOperators typeOperators) + public BucketFunction createBucketFunction(List partitionChannelTypes, int bucketCount, TypeOperators typeOperators) { - if (isHashPrecomputed) { - return new HashBucketFunction(new PrecomputedHashGenerator(0), bucketCount); - } - return new HashBucketFunction(createPagePrefixHashGenerator(partitionChannelTypes, typeOperators), bucketCount); } }, ROUND_ROBIN { @Override - public BucketFunction createBucketFunction(List partitionChannelTypes, boolean isHashPrecomputed, int bucketCount, TypeOperators typeOperators) + public BucketFunction createBucketFunction(List partitionChannelTypes, int bucketCount, TypeOperators typeOperators) { return new RoundRobinBucketFunction(bucketCount); } }, BROADCAST { @Override - public BucketFunction createBucketFunction(List partitionChannelTypes, boolean isHashPrecomputed, int bucketCount, TypeOperators typeOperators) + public BucketFunction createBucketFunction(List partitionChannelTypes, int bucketCount, TypeOperators typeOperators) { throw new UnsupportedOperationException(); } }, UNKNOWN { @Override - public BucketFunction createBucketFunction(List partitionChannelTypes, boolean isHashPrecomputed, int bucketCount, TypeOperators typeOperators) + public BucketFunction createBucketFunction(List partitionChannelTypes, int bucketCount, TypeOperators typeOperators) { throw new UnsupportedOperationException(); } }; public abstract BucketFunction createBucketFunction(List partitionChannelTypes, - boolean isHashPrecomputed, int bucketCount, TypeOperators typeOperators); diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/AdaptiveReorderPartitionedJoin.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/AdaptiveReorderPartitionedJoin.java index bb56c151d88c..a2416101862f 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/AdaptiveReorderPartitionedJoin.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/AdaptiveReorderPartitionedJoin.java @@ -152,10 +152,7 @@ public Result apply(JoinNode joinNode, Captures captures, Context context) private static boolean isBuildSideLocalExchangeNode(ExchangeNode exchangeNode, Set rightSymbols) { return exchangeNode.getScope() == LOCAL - && exchangeNode.getPartitioningScheme().getPartitioning().getColumns().equals(rightSymbols) - // TODO: Add support for local exchange with hash symbols. For now, it's not important since hash - // optimization is disabled by default - && exchangeNode.getPartitioningScheme().getHashColumn().isEmpty(); + && exchangeNode.getPartitioningScheme().getPartitioning().getColumns().equals(rightSymbols); } private static JoinNode flipJoinAndFixLocalExchanges( @@ -303,8 +300,7 @@ public PlanNode visitPlan(PlanNode node, RewriteContext ctx) context.getIdAllocator().getNextId(), LOCAL, node, - buildSymbols, - Optional.empty()); + buildSymbols); } @Override diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/AddExchangesBelowPartialAggregationOverGroupIdRuleSet.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/AddExchangesBelowPartialAggregationOverGroupIdRuleSet.java index 5beb9bcd11e0..f4f52fdd410e 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/AddExchangesBelowPartialAggregationOverGroupIdRuleSet.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/AddExchangesBelowPartialAggregationOverGroupIdRuleSet.java @@ -299,7 +299,6 @@ protected Optional transform(AggregationNode aggregation, GroupIdNode new PartitioningScheme( Partitioning.create(FIXED_HASH_DISTRIBUTION, desiredHashSymbols), source.getOutputSymbols(), - Optional.empty(), false, Optional.empty(), // It's fine to reuse partitionCount since that is computed by considering all the expanding nodes and table scans in a query diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PruneExchangeColumns.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PruneExchangeColumns.java index e0b4ce11266f..e0265b6d55c2 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PruneExchangeColumns.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PruneExchangeColumns.java @@ -74,7 +74,6 @@ protected Optional pushDownProjectOff(Context context, ExchangeNode ex ImmutableSet.Builder builder = ImmutableSet.builder(); builder.addAll(referencedOutputs); builder.addAll(exchangeNode.getPartitioningScheme().getPartitioning().getColumns()); - exchangeNode.getPartitioningScheme().getHashColumn().ifPresent(builder::add); exchangeNode.getOrderingScheme().ifPresent(orderingScheme -> builder.addAll(orderingScheme.orderBy())); Set outputsToRetain = builder.build(); @@ -103,7 +102,6 @@ protected Optional pushDownProjectOff(Context context, ExchangeNode ex PartitioningScheme newPartitioningScheme = new PartitioningScheme( exchangeNode.getPartitioningScheme().getPartitioning(), newOutputs.build(), - exchangeNode.getPartitioningScheme().getHashColumn(), exchangeNode.getPartitioningScheme().isReplicateNullsAndAny(), exchangeNode.getPartitioningScheme().getBucketToPartition(), exchangeNode.getPartitioningScheme().getPartitionCount()); diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PruneTableExecuteSourceColumns.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PruneTableExecuteSourceColumns.java index df535be17c06..7366ae56e6e4 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PruneTableExecuteSourceColumns.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PruneTableExecuteSourceColumns.java @@ -44,7 +44,6 @@ public Result apply(TableExecuteNode tableExecuteNode, Captures captures, Contex if (tableExecuteNode.getPartitioningScheme().isPresent()) { PartitioningScheme partitioningScheme = tableExecuteNode.getPartitioningScheme().get(); partitioningScheme.getPartitioning().getColumns().forEach(requiredInputs::add); - partitioningScheme.getHashColumn().ifPresent(requiredInputs::add); } return restrictChildOutputs(context.getIdAllocator(), tableExecuteNode, requiredInputs.build()) diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PruneTableWriterSourceColumns.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PruneTableWriterSourceColumns.java index 47b4fd6299db..2e99df0f914a 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PruneTableWriterSourceColumns.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PruneTableWriterSourceColumns.java @@ -46,7 +46,6 @@ public Result apply(TableWriterNode tableWriterNode, Captures captures, Context if (tableWriterNode.getPartitioningScheme().isPresent()) { PartitioningScheme partitioningScheme = tableWriterNode.getPartitioningScheme().get(); partitioningScheme.getPartitioning().getColumns().forEach(requiredInputs::add); - partitioningScheme.getHashColumn().ifPresent(requiredInputs::add); } if (tableWriterNode.getStatisticsAggregation().isPresent()) { diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushPartialAggregationThroughExchange.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushPartialAggregationThroughExchange.java index d46bfa1479a0..7c7346f8e928 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushPartialAggregationThroughExchange.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushPartialAggregationThroughExchange.java @@ -128,11 +128,6 @@ public Result apply(AggregationNode aggregationNode, Captures captures, Context } } - // currently, we only support plans that don't use pre-computed hash functions - if (exchangeNode.getPartitioningScheme().getHashColumn().isPresent()) { - return Result.empty(); - } - return switch (aggregationNode.getStep()) { case SINGLE -> Result.ofPlanNode(split(aggregationNode, context)); // Split it into a FINAL on top of a PARTIAL and case PARTIAL -> Result.ofPlanNode(pushPartial(aggregationNode, exchangeNode, context)); // Push it underneath each branch of the exchange @@ -179,7 +174,6 @@ private PlanNode pushPartial(AggregationNode aggregation, ExchangeNode exchange, PartitioningScheme partitioning = new PartitioningScheme( exchange.getPartitioningScheme().getPartitioning(), aggregation.getOutputSymbols(), - exchange.getPartitioningScheme().getHashColumn(), exchange.getPartitioningScheme().isReplicateNullsAndAny(), exchange.getPartitioningScheme().getBucketToPartition(), exchange.getPartitioningScheme().getPartitionCount()); diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushProjectionThroughExchange.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushProjectionThroughExchange.java index f48527f2e8f4..19b69c33794e 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushProjectionThroughExchange.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushProjectionThroughExchange.java @@ -100,14 +100,6 @@ public Result apply(ProjectNode project, Captures captures, Context context) inputs.add(inputSymbol); }); - // Need to retain the hash symbol for the exchange - exchange.getPartitioningScheme().getHashColumn() - .map(outputToInputMap::get) - .ifPresent(inputSymbol -> { - projections.putIdentity(inputSymbol); - inputs.add(inputSymbol); - }); - if (exchange.getOrderingScheme().isPresent()) { // Need to retain ordering columns for the exchange exchange.getOrderingScheme().get().orderBy().stream() @@ -122,7 +114,6 @@ public Result apply(ProjectNode project, Captures captures, Context context) ImmutableSet.Builder outputBuilder = ImmutableSet.builder(); partitioningColumns.forEach(outputBuilder::add); - exchange.getPartitioningScheme().getHashColumn().ifPresent(outputBuilder::add); exchange.getOrderingScheme().ifPresent(orderingScheme -> outputBuilder.addAll(orderingScheme.orderBy())); Set partitioningHashAndOrderingOutputs = outputBuilder.build(); @@ -146,7 +137,6 @@ public Result apply(ProjectNode project, Captures captures, Context context) // Construct the output symbols in the same order as the sources ImmutableList.Builder outputBuilder = ImmutableList.builder(); partitioningColumns.forEach(outputBuilder::add); - exchange.getPartitioningScheme().getHashColumn().ifPresent(outputBuilder::add); if (exchange.getOrderingScheme().isPresent()) { exchange.getOrderingScheme().get().orderBy().stream() // Do not duplicate symbols in outputs list (for consistency with inputs lists) @@ -168,7 +158,6 @@ public Result apply(ProjectNode project, Captures captures, Context context) PartitioningScheme partitioningScheme = new PartitioningScheme( exchange.getPartitioningScheme().getPartitioning(), outputBuilder.build(), - exchange.getPartitioningScheme().getHashColumn(), exchange.getPartitioningScheme().isReplicateNullsAndAny(), exchange.getPartitioningScheme().getBucketToPartition(), exchange.getPartitioningScheme().getPartitionCount()); diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushRemoteExchangeThroughAssignUniqueId.java b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushRemoteExchangeThroughAssignUniqueId.java index 44f5dcd887ff..6bbdbfb4122f 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushRemoteExchangeThroughAssignUniqueId.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/iterative/rule/PushRemoteExchangeThroughAssignUniqueId.java @@ -78,7 +78,6 @@ public Result apply(ExchangeNode node, Captures captures, Context context) new PartitioningScheme( partitioningScheme.getPartitioning(), removeSymbol(partitioningScheme.getOutputLayout(), assignUniqueId.getIdColumn()), - partitioningScheme.getHashColumn(), partitioningScheme.isReplicateNullsAndAny(), partitioningScheme.getBucketToPartition(), partitioningScheme.getPartitionCount()), diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/AddExchanges.java b/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/AddExchanges.java index bebc382c0586..7d5e8b81488a 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/AddExchanges.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/AddExchanges.java @@ -264,7 +264,7 @@ else if (!isNodePartitionedOn(child.getProperties(), partitioningRequirement) || .flatMap(partitioningColumns -> useParentPreferredPartitioning(node, partitioningColumns)) .orElse(node.getGroupingKeys()); child = withDerivedProperties( - partitionedExchange(idAllocator.getNextId(), REMOTE, child.getNode(), partitioningKeys, Optional.empty()), + partitionedExchange(idAllocator.getNextId(), REMOTE, child.getNode(), partitioningKeys), child.getProperties()); } return rebaseAndDeriveProperties(node, child); @@ -358,8 +358,7 @@ public PlanWithProperties visitMarkDistinct(MarkDistinctNode node, PreferredProp idAllocator.getNextId(), REMOTE, child.getNode(), - node.getDistinctSymbols(), - Optional.empty()), + node.getDistinctSymbols()), child.getProperties()); } @@ -389,7 +388,7 @@ public PlanWithProperties visitWindow(WindowNode node, PreferredProperties prefe } else { child = withDerivedProperties( - partitionedExchange(idAllocator.getNextId(), REMOTE, child.getNode(), node.getPartitionBy(), Optional.empty()), + partitionedExchange(idAllocator.getNextId(), REMOTE, child.getNode(), node.getPartitionBy()), child.getProperties()); } } @@ -420,7 +419,7 @@ public PlanWithProperties visitPatternRecognition(PatternRecognitionNode node, P } else { child = withDerivedProperties( - partitionedExchange(idAllocator.getNextId(), REMOTE, child.getNode(), node.getPartitionBy(), Optional.empty()), + partitionedExchange(idAllocator.getNextId(), REMOTE, child.getNode(), node.getPartitionBy()), child.getProperties()); } } @@ -472,7 +471,7 @@ else if (!isNodePartitionedOn(child.getProperties(), partitionBy)) { } else { child = withDerivedProperties( - partitionedExchange(idAllocator.getNextId(), REMOTE, child.getNode(), partitionBy, Optional.empty()), + partitionedExchange(idAllocator.getNextId(), REMOTE, child.getNode(), partitionBy), child.getProperties()); } } @@ -507,8 +506,7 @@ public PlanWithProperties visitRowNumber(RowNumberNode node, PreferredProperties idAllocator.getNextId(), REMOTE, child.getNode(), - node.getPartitionBy(), - Optional.empty()), + node.getPartitionBy()), child.getProperties()); } @@ -531,7 +529,7 @@ public PlanWithProperties visitTopNRanking(TopNRankingNode node, PreferredProper preferredChildProperties = computePreference( partitionedWithLocal(ImmutableSet.copyOf(node.getPartitionBy()), grouped(node.getPartitionBy())), preferredProperties); - addExchange = partial -> partitionedExchange(idAllocator.getNextId(), REMOTE, partial, node.getPartitionBy(), Optional.empty()); + addExchange = partial -> partitionedExchange(idAllocator.getNextId(), REMOTE, partial, node.getPartitionBy()); } PlanWithProperties child = planChild(node, preferredChildProperties); @@ -795,10 +793,10 @@ private PlanWithProperties getWriterPlanWithProperties(Optional leftS } else { left = withDerivedProperties( - partitionedExchange(idAllocator.getNextId(), REMOTE, left.getNode(), leftSymbols, Optional.empty()), + partitionedExchange(idAllocator.getNextId(), REMOTE, left.getNode(), leftSymbols), left.getProperties()); right = withDerivedProperties( - partitionedExchange(idAllocator.getNextId(), REMOTE, right.getNode(), rightSymbols, Optional.empty()), + partitionedExchange(idAllocator.getNextId(), REMOTE, right.getNode(), rightSymbols), right.getProperties()); } } @@ -1091,10 +1089,10 @@ public PlanWithProperties visitSpatialJoin(SpatialJoinNode node, PreferredProper } else { left = withDerivedProperties( - partitionedExchange(idAllocator.getNextId(), REMOTE, left.getNode(), ImmutableList.of(node.getLeftPartitionSymbol().get()), Optional.empty()), + partitionedExchange(idAllocator.getNextId(), REMOTE, left.getNode(), ImmutableList.of(node.getLeftPartitionSymbol().get())), left.getProperties()); right = withDerivedProperties( - partitionedExchange(idAllocator.getNextId(), REMOTE, right.getNode(), ImmutableList.of(node.getRightPartitionSymbol().get()), Optional.empty()), + partitionedExchange(idAllocator.getNextId(), REMOTE, right.getNode(), ImmutableList.of(node.getRightPartitionSymbol().get())), right.getProperties()); } @@ -1140,7 +1138,6 @@ public PlanWithProperties visitSemiJoin(SemiJoinNode node, PreferredProperties p partitionedExchange(idAllocator.getNextId(), REMOTE, filteringSource.getNode(), new PartitioningScheme( filteringPartitioning, filteringSource.getNode().getOutputSymbols(), - Optional.empty(), true, Optional.empty(), Optional.empty())), @@ -1158,10 +1155,10 @@ public PlanWithProperties visitSemiJoin(SemiJoinNode node, PreferredProperties p } else { source = withDerivedProperties( - partitionedExchange(idAllocator.getNextId(), REMOTE, source.getNode(), sourceSymbols, Optional.empty()), + partitionedExchange(idAllocator.getNextId(), REMOTE, source.getNode(), sourceSymbols), source.getProperties()); filteringSource = withDerivedProperties( - partitionedExchange(idAllocator.getNextId(), REMOTE, filteringSource.getNode(), filteringSourceSymbols, Optional.empty(), true), + partitionedExchange(idAllocator.getNextId(), REMOTE, filteringSource.getNode(), filteringSourceSymbols, true), filteringSource.getProperties()); } } @@ -1175,7 +1172,6 @@ public PlanWithProperties visitSemiJoin(SemiJoinNode node, PreferredProperties p partitionedExchange(idAllocator.getNextId(), REMOTE, filteringSource.getNode(), new PartitioningScheme( filteringPartitioning, filteringSource.getNode().getOutputSymbols(), - Optional.empty(), true, Optional.empty(), Optional.empty())), @@ -1305,7 +1301,6 @@ public PlanWithProperties visitUnion(UnionNode node, PreferredProperties parentP new PartitioningScheme( childPartitioning, source.getNode().getOutputSymbols(), - Optional.empty(), nullsAndAnyReplicated, Optional.empty(), Optional.empty())), diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/AddLocalExchanges.java b/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/AddLocalExchanges.java index c71a16a8e10e..2860f28d5e98 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/AddLocalExchanges.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/AddLocalExchanges.java @@ -353,8 +353,7 @@ public PlanWithProperties visitAggregation(AggregationNode node, StreamPreferred idAllocator.getNextId(), LOCAL, child.getNode(), - groupingKeys, - Optional.empty()), + groupingKeys), child.getProperties()); return rebaseAndDeriveProperties(node, ImmutableList.of(exchange)); } @@ -872,8 +871,7 @@ public PlanWithProperties visitUnion(UnionNode node, StreamPreferredProperties p LOCAL, new PartitioningScheme( Partitioning.create(FIXED_HASH_DISTRIBUTION, preferredPartitionColumns.get()), - node.getOutputSymbols(), - Optional.empty()), + node.getOutputSymbols()), sources, inputLayouts, Optional.empty()); @@ -1039,8 +1037,7 @@ private PlanWithProperties enforce(PlanWithProperties planWithProperties, Stream idAllocator.getNextId(), LOCAL, planWithProperties.getNode(), - requiredPartitionColumns.get(), - Optional.empty()); + requiredPartitionColumns.get()); return deriveProperties(exchangeNode, planWithProperties.getProperties()); } diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/SymbolMapper.java b/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/SymbolMapper.java index 2b276d10a678..238b6ce507d0 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/SymbolMapper.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/SymbolMapper.java @@ -599,7 +599,6 @@ public PartitioningScheme map(PartitioningScheme scheme, List sourceLayo return new PartitioningScheme( scheme.getPartitioning().translate(this::map), mapAndDistinct(sourceLayout), - scheme.getHashColumn().map(this::map), scheme.isReplicateNullsAndAny(), scheme.getBucketToPartition(), scheme.getPartitionCount()); diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/plan/ExchangeNode.java b/core/trino-main/src/main/java/io/trino/sql/planner/plan/ExchangeNode.java index 47ac2aa7ed43..f23b5e4086b7 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/plan/ExchangeNode.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/plan/ExchangeNode.java @@ -116,12 +116,12 @@ public ExchangeNode( this.orderingScheme = orderingScheme; } - public static ExchangeNode partitionedExchange(PlanNodeId id, Scope scope, PlanNode child, List partitioningColumns, Optional hashColumns) + public static ExchangeNode partitionedExchange(PlanNodeId id, Scope scope, PlanNode child, List partitioningColumns) { - return partitionedExchange(id, scope, child, partitioningColumns, hashColumns, false); + return partitionedExchange(id, scope, child, partitioningColumns, false); } - public static ExchangeNode partitionedExchange(PlanNodeId id, Scope scope, PlanNode child, List partitioningColumns, Optional hashColumns, boolean replicateNullsAndAny) + public static ExchangeNode partitionedExchange(PlanNodeId id, Scope scope, PlanNode child, List partitioningColumns, boolean replicateNullsAndAny) { return partitionedExchange( id, @@ -130,7 +130,6 @@ public static ExchangeNode partitionedExchange(PlanNodeId id, Scope scope, PlanN new PartitioningScheme( Partitioning.create(FIXED_HASH_DISTRIBUTION, partitioningColumns), child.getOutputSymbols(), - hashColumns, replicateNullsAndAny, Optional.empty(), Optional.empty())); diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/planprinter/PlanPrinter.java b/core/trino-main/src/main/java/io/trino/sql/planner/planprinter/PlanPrinter.java index 5a5e4c9426dd..4b0bc719aa12 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/planprinter/PlanPrinter.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/planprinter/PlanPrinter.java @@ -581,18 +581,15 @@ private static String formatFragment( }) .collect(toImmutableList()); builder.append(indentString(1)); - String hashColumn = partitioningScheme.getHashColumn().map(anonymizer::anonymize).map(column -> "[" + column + "]").orElse(""); if (replicateNullsAndAny) { - builder.append(format("Output partitioning: %s (replicate nulls and any) [%s]%s", + builder.append(format("Output partitioning: %s (replicate nulls and any) [%s]", anonymizer.anonymize(partitioningScheme.getPartitioning().getHandle()), - Joiner.on(", ").join(arguments), - hashColumn)); + Joiner.on(", ").join(arguments))); } else { - builder.append(format("Output partitioning: %s [%s]%s\n", + builder.append(format("Output partitioning: %s [%s]\n", anonymizer.anonymize(partitioningScheme.getPartitioning().getHandle()), - Joiner.on(", ").join(arguments), - hashColumn)); + Joiner.on(", ").join(arguments))); } partitioningScheme.getPartitionCount().ifPresent(partitionCount -> builder.append(format("%sOutput partition count: %s\n", indentString(1), partitionCount))); fragment.getPartitionCount().ifPresent(partitionCount -> builder.append(format("%sInput partition count: %s\n", indentString(1), partitionCount))); @@ -1673,7 +1670,6 @@ else if (node.getScope() == Scope.LOCAL) { ImmutableMap.of( "partitioning", anonymizer.anonymize(node.getPartitioningScheme().getPartitioning().getHandle()), "isReplicateNullsAndAny", formatBoolean(node.getPartitioningScheme().isReplicateNullsAndAny()), - "hashColumn", formatHash(node.getPartitioningScheme().getHashColumn()), "arguments", formatCollection(node.getPartitioningScheme().getPartitioning().getArguments(), anonymizer::anonymize)), context); } @@ -1684,8 +1680,7 @@ else if (node.getScope() == Scope.LOCAL) { "partitionCount", node.getPartitioningScheme().getPartitionCount().map(String::valueOf).orElse(""), "scaleWriters", formatBoolean(node.getPartitioningScheme().getPartitioning().getHandle().isScaleWriters()), "type", node.getType().name(), - "isReplicateNullsAndAny", formatBoolean(node.getPartitioningScheme().isReplicateNullsAndAny()), - "hashColumn", formatHash(node.getPartitioningScheme().getHashColumn())), + "isReplicateNullsAndAny", formatBoolean(node.getPartitioningScheme().isReplicateNullsAndAny())), context); } return processChildren(node, new Context(context.isInitialPlan())); diff --git a/core/trino-main/src/test/java/io/trino/cost/TestCostCalculator.java b/core/trino-main/src/test/java/io/trino/cost/TestCostCalculator.java index b71550a8a0c9..1526c6491023 100644 --- a/core/trino-main/src/test/java/io/trino/cost/TestCostCalculator.java +++ b/core/trino-main/src/test/java/io/trino/cost/TestCostCalculator.java @@ -412,9 +412,9 @@ public void testRepartitionedJoinWithExchange() { TableScanNode ts1 = tableScan("ts1", new Symbol(BIGINT, "orderkey")); TableScanNode ts2 = tableScan("ts2", new Symbol(BIGINT, "orderkey_0")); - ExchangeNode remoteExchange1 = partitionedExchange(new PlanNodeId("re1"), REMOTE, ts1, ImmutableList.of(new Symbol(BIGINT, "orderkey")), Optional.empty()); - ExchangeNode remoteExchange2 = partitionedExchange(new PlanNodeId("re2"), REMOTE, ts2, ImmutableList.of(new Symbol(BIGINT, "orderkey_0")), Optional.empty()); - ExchangeNode localExchange = partitionedExchange(new PlanNodeId("le"), LOCAL, remoteExchange2, ImmutableList.of(new Symbol(BIGINT, "orderkey_0")), Optional.empty()); + ExchangeNode remoteExchange1 = partitionedExchange(new PlanNodeId("re1"), REMOTE, ts1, ImmutableList.of(new Symbol(BIGINT, "orderkey"))); + ExchangeNode remoteExchange2 = partitionedExchange(new PlanNodeId("re2"), REMOTE, ts2, ImmutableList.of(new Symbol(BIGINT, "orderkey_0"))); + ExchangeNode localExchange = partitionedExchange(new PlanNodeId("le"), LOCAL, remoteExchange2, ImmutableList.of(new Symbol(BIGINT, "orderkey_0"))); JoinNode join = join("join", remoteExchange1, @@ -441,7 +441,7 @@ public void testReplicatedJoinWithExchange() TableScanNode ts1 = tableScan("ts1", new Symbol(BIGINT, "orderkey")); TableScanNode ts2 = tableScan("ts2", new Symbol(BIGINT, "orderkey_0")); ExchangeNode remoteExchange2 = replicatedExchange(new PlanNodeId("re2"), REMOTE, ts2); - ExchangeNode localExchange = partitionedExchange(new PlanNodeId("le"), LOCAL, remoteExchange2, ImmutableList.of(new Symbol(BIGINT, "orderkey_0")), Optional.empty()); + ExchangeNode localExchange = partitionedExchange(new PlanNodeId("le"), LOCAL, remoteExchange2, ImmutableList.of(new Symbol(BIGINT, "orderkey_0"))); JoinNode join = join("join", ts1, diff --git a/core/trino-main/src/test/java/io/trino/operator/exchange/TestLocalExchange.java b/core/trino-main/src/test/java/io/trino/operator/exchange/TestLocalExchange.java index fa35534e9ae4..a1c20c828588 100644 --- a/core/trino-main/src/test/java/io/trino/operator/exchange/TestLocalExchange.java +++ b/core/trino-main/src/test/java/io/trino/operator/exchange/TestLocalExchange.java @@ -121,7 +121,6 @@ public void testGatherSingleWriter() SINGLE_DISTRIBUTION, ImmutableList.of(), ImmutableList.of(), - Optional.empty(), DataSize.ofBytes(retainedSizeOfPages(99)), TYPE_OPERATORS, WRITER_SCALING_MIN_DATA_PROCESSED, @@ -195,7 +194,6 @@ public void testRandom() FIXED_ARBITRARY_DISTRIBUTION, ImmutableList.of(), ImmutableList.of(), - Optional.empty(), LOCAL_EXCHANGE_MAX_BUFFERED_BYTES, TYPE_OPERATORS, WRITER_SCALING_MIN_DATA_PROCESSED, @@ -245,7 +243,6 @@ public void testScaleWriter() SCALED_WRITER_ROUND_ROBIN_DISTRIBUTION, ImmutableList.of(), ImmutableList.of(), - Optional.empty(), DataSize.ofBytes(retainedSizeOfPages(4)), TYPE_OPERATORS, DataSize.ofBytes(sizeOfPages(2)), @@ -305,7 +302,6 @@ public void testNoWriterScalingWhenOnlyBufferSizeLimitIsExceeded() SCALED_WRITER_ROUND_ROBIN_DISTRIBUTION, ImmutableList.of(), ImmutableList.of(), - Optional.empty(), DataSize.ofBytes(retainedSizeOfPages(4)), TYPE_OPERATORS, DataSize.ofBytes(sizeOfPages(10)), @@ -356,7 +352,6 @@ private void testScalingWithTwoDifferentPartitions(PartitioningHandle partitioni partitioningHandle, ImmutableList.of(0), TYPES, - Optional.empty(), DataSize.ofBytes(retainedSizeOfPages(2)), TYPE_OPERATORS, DataSize.of(10, KILOBYTE), @@ -465,7 +460,6 @@ public void testScaledWriterRoundRobinExchangerWhenTotalMemoryUsedIsGreaterThanL SCALED_WRITER_ROUND_ROBIN_DISTRIBUTION, ImmutableList.of(), ImmutableList.of(), - Optional.empty(), DataSize.ofBytes(retainedSizeOfPages(4)), TYPE_OPERATORS, DataSize.ofBytes(sizeOfPages(2)), @@ -509,7 +503,6 @@ public void testNoWriterScalingWhenOnlyWriterScalingMinDataProcessedLimitIsExcee SCALED_WRITER_ROUND_ROBIN_DISTRIBUTION, ImmutableList.of(), ImmutableList.of(), - Optional.empty(), DataSize.ofBytes(retainedSizeOfPages(20)), TYPE_OPERATORS, DataSize.ofBytes(sizeOfPages(2)), @@ -562,7 +555,6 @@ private void testScalingForSkewedWriters(PartitioningHandle partitioningHandle) partitioningHandle, ImmutableList.of(0), TYPES, - Optional.empty(), DataSize.ofBytes(retainedSizeOfPages(2)), TYPE_OPERATORS, DataSize.of(10, KILOBYTE), @@ -658,7 +650,6 @@ private void testNoScalingWhenDataWrittenIsLessThanMinFileSize(PartitioningHandl partitioningHandle, ImmutableList.of(0), TYPES, - Optional.empty(), DataSize.ofBytes(retainedSizeOfPages(2)), TYPE_OPERATORS, DataSize.of(50, MEGABYTE), @@ -728,7 +719,6 @@ private void testNoScalingWhenBufferUtilizationIsLessThanLimit(PartitioningHandl partitioningHandle, ImmutableList.of(0), TYPES, - Optional.empty(), DataSize.of(50, MEGABYTE), TYPE_OPERATORS, DataSize.of(10, KILOBYTE), @@ -800,7 +790,6 @@ private void testNoScalingWhenTotalMemoryUsedIsGreaterThanLimit(PartitioningHand partitioningHandle, ImmutableList.of(0), TYPES, - Optional.empty(), DataSize.ofBytes(retainedSizeOfPages(2)), TYPE_OPERATORS, DataSize.of(10, KILOBYTE), @@ -887,7 +876,6 @@ private void testDoNotUpdateScalingStateWhenMemoryIsAboveLimit(PartitioningHandl partitioningHandle, ImmutableList.of(0), TYPES, - Optional.empty(), DataSize.ofBytes(retainedSizeOfPages(2)), TYPE_OPERATORS, DataSize.of(10, KILOBYTE), @@ -980,7 +968,6 @@ public void testNoScalingWhenNoWriterSkewness() SCALED_WRITER_HASH_DISTRIBUTION, ImmutableList.of(0), TYPES, - Optional.empty(), DataSize.ofBytes(retainedSizeOfPages(2)), TYPE_OPERATORS, DataSize.of(50, KILOBYTE), @@ -1028,7 +1015,6 @@ public void testPassthrough() FIXED_PASSTHROUGH_DISTRIBUTION, ImmutableList.of(), ImmutableList.of(), - Optional.empty(), DataSize.ofBytes(retainedSizeOfPages(1)), TYPE_OPERATORS, WRITER_SCALING_MIN_DATA_PROCESSED, @@ -1096,7 +1082,6 @@ public void testPartition() FIXED_HASH_DISTRIBUTION, ImmutableList.of(0), TYPES, - Optional.empty(), LOCAL_EXCHANGE_MAX_BUFFERED_BYTES, TYPE_OPERATORS, WRITER_SCALING_MIN_DATA_PROCESSED, @@ -1193,7 +1178,6 @@ public BucketFunction getBucketFunction(ConnectorTransactionHandle transactionHa partitioningHandle, ImmutableList.of(1), ImmutableList.of(BIGINT), - Optional.empty(), LOCAL_EXCHANGE_MAX_BUFFERED_BYTES, TYPE_OPERATORS, WRITER_SCALING_MIN_DATA_PROCESSED, @@ -1245,7 +1229,6 @@ public void writeUnblockWhenAllReadersFinish() FIXED_ARBITRARY_DISTRIBUTION, ImmutableList.of(), ImmutableList.of(), - Optional.empty(), LOCAL_EXCHANGE_MAX_BUFFERED_BYTES, TYPE_OPERATORS, WRITER_SCALING_MIN_DATA_PROCESSED, @@ -1293,7 +1276,6 @@ public void writeUnblockWhenAllReadersFinishAndPagesConsumed() FIXED_PASSTHROUGH_DISTRIBUTION, ImmutableList.of(), ImmutableList.of(), - Optional.empty(), DataSize.ofBytes(2), TYPE_OPERATORS, WRITER_SCALING_MIN_DATA_PROCESSED, diff --git a/core/trino-main/src/test/java/io/trino/operator/join/JoinTestUtils.java b/core/trino-main/src/test/java/io/trino/operator/join/JoinTestUtils.java index 25eaa6ddfabe..d4c63a3a32e3 100644 --- a/core/trino-main/src/test/java/io/trino/operator/join/JoinTestUtils.java +++ b/core/trino-main/src/test/java/io/trino/operator/join/JoinTestUtils.java @@ -144,7 +144,6 @@ public static BuildSideSetup setupBuildSide( FIXED_HASH_DISTRIBUTION, hashChannels, hashChannelTypes, - Optional.empty(), DataSize.of(32, DataSize.Unit.MEGABYTE), TYPE_OPERATORS, DataSize.of(32, DataSize.Unit.MEGABYTE), diff --git a/core/trino-main/src/test/java/io/trino/operator/join/unspilled/JoinTestUtils.java b/core/trino-main/src/test/java/io/trino/operator/join/unspilled/JoinTestUtils.java index 0738783740cd..04d9d128d914 100644 --- a/core/trino-main/src/test/java/io/trino/operator/join/unspilled/JoinTestUtils.java +++ b/core/trino-main/src/test/java/io/trino/operator/join/unspilled/JoinTestUtils.java @@ -142,7 +142,6 @@ public static BuildSideSetup setupBuildSide( FIXED_HASH_DISTRIBUTION, hashChannels, hashChannelTypes, - buildPages.getHashChannel(), DataSize.of(32, DataSize.Unit.MEGABYTE), TYPE_OPERATORS, DataSize.of(32, DataSize.Unit.MEGABYTE), diff --git a/core/trino-main/src/test/java/io/trino/operator/output/TestPagePartitioner.java b/core/trino-main/src/test/java/io/trino/operator/output/TestPagePartitioner.java index f921f9d02b86..4318256ad669 100644 --- a/core/trino-main/src/test/java/io/trino/operator/output/TestPagePartitioner.java +++ b/core/trino-main/src/test/java/io/trino/operator/output/TestPagePartitioner.java @@ -176,7 +176,7 @@ private void testOutputForPageWithNoBlockPartitionFunction(PartitioningMode part PagePartitioner pagePartitioner = pagePartitioner(outputBuffer, BIGINT) .withPartitionFunction(new BucketPartitionFunction( - ROUND_ROBIN.createBucketFunction(null, false, PARTITION_COUNT, null), + ROUND_ROBIN.createBucketFunction(null, PARTITION_COUNT, null), IntStream.range(0, PARTITION_COUNT).toArray())) .withPartitionChannels(ImmutableList.of()) .build(); diff --git a/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/TestPruneExchangeColumns.java b/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/TestPruneExchangeColumns.java index 1062bb5de288..49f78eeadba6 100644 --- a/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/TestPruneExchangeColumns.java +++ b/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/TestPruneExchangeColumns.java @@ -73,28 +73,6 @@ public void testDoNotPrunePartitioningSymbol() .doesNotFire(); } - @Test - public void testDoNotPruneHashSymbol() - { - tester().assertThat(new PruneExchangeColumns()) - .on(p -> { - Symbol a = p.symbol("a"); - Symbol h = p.symbol("h"); - Symbol b = p.symbol("b"); - Symbol h1 = p.symbol("h_1"); - return p.project( - Assignments.identity(a), - p.exchange(e -> e - .addSource(p.values(b, h1)) - .addInputsSet(b, h1) - .fixedHashDistributionPartitioningScheme( - ImmutableList.of(a, h), - ImmutableList.of(a), - h))); - }) - .doesNotFire(); - } - @Test public void testDoNotPruneOrderingSymbol() { diff --git a/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/TestPruneTableExecuteSourceColumns.java b/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/TestPruneTableExecuteSourceColumns.java index febba3e59bac..09b26dfcc68c 100644 --- a/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/TestPruneTableExecuteSourceColumns.java +++ b/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/TestPruneTableExecuteSourceColumns.java @@ -74,15 +74,13 @@ public void testDoNotPrunePartitioningSchemeSymbols() .on(p -> { Symbol a = p.symbol("a"); Symbol partition = p.symbol("partition"); - Symbol hash = p.symbol("hash"); return p.tableExecute( ImmutableList.of(a), ImmutableList.of("column_a"), Optional.of(p.partitioningScheme( - ImmutableList.of(partition, hash), ImmutableList.of(partition), - hash)), - p.values(a, partition, hash)); + ImmutableList.of(partition))), + p.values(a, partition)); }) .doesNotFire(); } diff --git a/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/TestPruneTableWriterSourceColumns.java b/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/TestPruneTableWriterSourceColumns.java index 7fb8aa9b868d..392330f0c038 100644 --- a/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/TestPruneTableWriterSourceColumns.java +++ b/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/TestPruneTableWriterSourceColumns.java @@ -76,17 +76,15 @@ public void testDoNotPrunePartitioningSchemeSymbols() .on(p -> { Symbol a = p.symbol("a"); Symbol partition = p.symbol("partition"); - Symbol hash = p.symbol("hash"); return p.tableWriter( ImmutableList.of(a), ImmutableList.of("column_a"), Optional.of(p.partitioningScheme( - ImmutableList.of(partition, hash), ImmutableList.of(partition), - hash)), + ImmutableList.of(partition))), Optional.empty(), Optional.empty(), - p.values(a, partition, hash)); + p.values(a, partition)); }) .doesNotFire(); } diff --git a/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/TestPushProjectionThroughExchange.java b/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/TestPushProjectionThroughExchange.java index a6ea06b9a40b..7fe60502facc 100644 --- a/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/TestPushProjectionThroughExchange.java +++ b/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/TestPushProjectionThroughExchange.java @@ -123,21 +123,18 @@ public void testHashMapping() tester().assertThat(new PushProjectionThroughExchange()) .on(p -> { Symbol a = p.symbol("a", INTEGER); - Symbol h1 = p.symbol("h_1"); Symbol c = p.symbol("c", INTEGER); - Symbol h = p.symbol("h"); Symbol cTimes5 = p.symbol("c_times_5", INTEGER); return p.project( Assignments.of( cTimes5, new Call(MULTIPLY_INTEGER, ImmutableList.of(new Reference(INTEGER, "c"), new Constant(INTEGER, 5L)))), p.exchange(e -> e .addSource( - p.values(a, h1)) - .addInputsSet(a, h1) + p.values(a)) + .addInputsSet(a) .fixedHashDistributionPartitioningScheme( - ImmutableList.of(c, h), ImmutableList.of(c), - h))); + ImmutableList.of(c)))); }) .matches( project( @@ -145,9 +142,8 @@ cTimes5, new Call(MULTIPLY_INTEGER, ImmutableList.of(new Reference(INTEGER, "c") strictProject( ImmutableMap.of( "a", expression(new Reference(INTEGER, "a")), - "h_1", expression(new Reference(BIGINT, "h_1")), "a_times_5", expression(new Call(MULTIPLY_INTEGER, ImmutableList.of(new Reference(INTEGER, "a"), new Constant(INTEGER, 5L))))), - values(ImmutableList.of("a", "h_1")))))); + values(ImmutableList.of("a")))))); } @Test @@ -269,39 +265,33 @@ public void testPartitioningColumnAndHashWithoutIdentityMappingInProjection() .on(p -> { Symbol a = p.symbol("a", INTEGER); Symbol b = p.symbol("b", INTEGER); - Symbol h = p.symbol("h", INTEGER); Symbol aTimes5 = p.symbol("a_times_5", INTEGER); Symbol bTimes5 = p.symbol("b_times_5", INTEGER); - Symbol hTimes5 = p.symbol("h_times_5", INTEGER); return p.project( Assignments.builder() .put(aTimes5, new Call(MULTIPLY_INTEGER, ImmutableList.of(new Reference(INTEGER, "a"), new Constant(INTEGER, 5L)))) .put(bTimes5, new Call(MULTIPLY_INTEGER, ImmutableList.of(new Reference(INTEGER, "b"), new Constant(INTEGER, 5L)))) - .put(hTimes5, new Call(MULTIPLY_INTEGER, ImmutableList.of(new Reference(INTEGER, "h"), new Constant(INTEGER, 5L)))) .build(), p.exchange(e -> e .addSource( - p.values(a, b, h)) - .addInputsSet(a, b, h) + p.values(a, b)) + .addInputsSet(a, b) .fixedHashDistributionPartitioningScheme( - ImmutableList.of(a, b, h), - ImmutableList.of(b), - h))); + ImmutableList.of(a, b), + ImmutableList.of(b)))); }) .matches( project( exchange( project( values( - ImmutableList.of("a", "b", "h")) - ).withNumberOfOutputColumns(5) + ImmutableList.of("a", "b")) + ).withNumberOfOutputColumns(3) .withAlias("b", expression(new Reference(INTEGER, "b"))) - .withAlias("h", expression(new Reference(INTEGER, "h"))) .withAlias("a_times_5", expression(new Call(MULTIPLY_INTEGER, ImmutableList.of(new Reference(INTEGER, "a"), new Constant(INTEGER, 5L))))) - .withAlias("b_times_5", expression(new Call(MULTIPLY_INTEGER, ImmutableList.of(new Reference(INTEGER, "b"), new Constant(INTEGER, 5L))))) - .withAlias("h_times_5", expression(new Call(MULTIPLY_INTEGER, ImmutableList.of(new Reference(INTEGER, "h"), new Constant(INTEGER, 5L)))))) - ).withNumberOfOutputColumns(3) - .withExactOutputs("a_times_5", "b_times_5", "h_times_5")); + .withAlias("b_times_5", expression(new Call(MULTIPLY_INTEGER, ImmutableList.of(new Reference(INTEGER, "b"), new Constant(INTEGER, 5L)))))) + ).withNumberOfOutputColumns(2) + .withExactOutputs("a_times_5", "b_times_5")); } @Test diff --git a/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/test/PlanBuilder.java b/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/test/PlanBuilder.java index 774270686b43..e53ec978d6e3 100644 --- a/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/test/PlanBuilder.java +++ b/core/trino-main/src/test/java/io/trino/sql/planner/iterative/rule/test/PlanBuilder.java @@ -910,22 +910,12 @@ public ExchangeBuilder fixedHashDistributionPartitioningScheme(List outp ImmutableList.copyOf(outputSymbols))); } - public ExchangeBuilder fixedHashDistributionPartitioningScheme(List outputSymbols, List partitioningSymbols, Symbol hashSymbol) - { - return partitioningScheme(new PartitioningScheme(Partitioning.create( - FIXED_HASH_DISTRIBUTION, - ImmutableList.copyOf(partitioningSymbols)), - ImmutableList.copyOf(outputSymbols), - Optional.of(hashSymbol))); - } - public ExchangeBuilder fixedHashDistributionPartitioningScheme(List outputSymbols, List partitioningSymbols, int partitionCount) { return partitioningScheme(new PartitioningScheme(Partitioning.create( FIXED_HASH_DISTRIBUTION, ImmutableList.copyOf(partitioningSymbols)), ImmutableList.copyOf(outputSymbols), - Optional.empty(), false, Optional.empty(), Optional.of(partitionCount))); @@ -937,7 +927,6 @@ public ExchangeBuilder fixedArbitraryDistributionPartitioningScheme(List FIXED_ARBITRARY_DISTRIBUTION, ImmutableList.of()), ImmutableList.copyOf(outputSymbols), - Optional.empty(), false, Optional.empty(), Optional.of(partitionCount))); @@ -1273,13 +1262,12 @@ public TableFunctionProcessorNode tableFunctionProcessor(Consumer outputSymbols, List partitioningSymbols, Symbol hashSymbol) + public PartitioningScheme partitioningScheme(List outputSymbols, List partitioningSymbols) { return new PartitioningScheme(Partitioning.create( FIXED_HASH_DISTRIBUTION, ImmutableList.copyOf(partitioningSymbols)), - ImmutableList.copyOf(outputSymbols), - Optional.of(hashSymbol)); + ImmutableList.copyOf(outputSymbols)); } public StatisticAggregations statisticAggregations(Map aggregations, List groupingSymbols) diff --git a/testing/trino-tests/src/test/java/io/trino/execution/TestEventListenerBasic.java b/testing/trino-tests/src/test/java/io/trino/execution/TestEventListenerBasic.java index d26655abfb57..b343c1ccab20 100644 --- a/testing/trino-tests/src/test/java/io/trino/execution/TestEventListenerBasic.java +++ b/testing/trino-tests/src/test/java/io/trino/execution/TestEventListenerBasic.java @@ -1471,7 +1471,6 @@ public void testAnonymizedJsonPlan() ImmutableMap.of( "partitioning", "[connectorHandleType = SystemPartitioningHandle, partitioning = SINGLE, function = SINGLE]", "isReplicateNullsAndAny", "", - "hashColumn", "[]", "arguments", "[]"), ImmutableList.of(new Symbol(DOUBLE, "symbol_1")), ImmutableList.of(), From 09ee826c6b3e3b98610f692743b81e58860fa478 Mon Sep 17 00:00:00 2001 From: Raunaq Morarka Date: Mon, 16 Jun 2025 10:56:37 +0530 Subject: [PATCH 6/6] Clean up pre-computed hashes related code in tests --- .../test/java/io/trino/RowPagesBuilder.java | 47 +----- ...kHashAndStreamingAggregationOperators.java | 1 - .../operator/BenchmarkWindowOperator.java | 2 +- .../operator/GroupByHashYieldAssertion.java | 4 +- .../io/trino/operator/OperatorAssertion.java | 74 +-------- .../operator/TestDistinctLimitOperator.java | 34 +---- .../trino/operator/TestGroupIdOperator.java | 2 +- .../operator/TestHashAggregationOperator.java | 44 +++--- .../operator/TestHashSemiJoinOperator.java | 32 ++-- .../operator/TestMarkDistinctOperator.java | 29 ++-- .../trino/operator/TestRowNumberOperator.java | 8 +- .../operator/TestTopNRankingOperator.java | 94 ++++++------ .../BenchmarkHashBuildAndJoinOperators.java | 4 +- .../operator/join/TestHashJoinOperator.java | 106 ++++++------- .../BenchmarkHashBuildAndJoinOperators.java | 7 +- .../join/unspilled/TestHashJoinOperator.java | 142 ++++++++---------- 16 files changed, 241 insertions(+), 389 deletions(-) diff --git a/core/trino-main/src/test/java/io/trino/RowPagesBuilder.java b/core/trino-main/src/test/java/io/trino/RowPagesBuilder.java index 7ccb3c2a3165..1a87f0e001a4 100644 --- a/core/trino-main/src/test/java/io/trino/RowPagesBuilder.java +++ b/core/trino-main/src/test/java/io/trino/RowPagesBuilder.java @@ -17,9 +17,7 @@ import com.google.common.collect.Iterables; import io.trino.spi.Page; import io.trino.spi.block.Block; -import io.trino.spi.type.BigintType; import io.trino.spi.type.Type; -import io.trino.type.TypeTestUtils; import java.util.List; import java.util.Optional; @@ -40,31 +38,29 @@ public static RowPagesBuilder rowPagesBuilder(Iterable types) return new RowPagesBuilder(types); } - public static RowPagesBuilder rowPagesBuilder(boolean hashEnabled, List hashChannels, Type... types) + public static RowPagesBuilder rowPagesBuilder(List hashChannels, Type... types) { - return rowPagesBuilder(hashEnabled, hashChannels, ImmutableList.copyOf(types)); + return rowPagesBuilder(hashChannels, ImmutableList.copyOf(types)); } - public static RowPagesBuilder rowPagesBuilder(boolean hashEnabled, List hashChannels, Iterable types) + public static RowPagesBuilder rowPagesBuilder(List hashChannels, Iterable types) { - return new RowPagesBuilder(hashEnabled, Optional.of(hashChannels), types); + return new RowPagesBuilder(Optional.of(hashChannels), types); } private final ImmutableList.Builder pages = ImmutableList.builder(); private final List types; private RowPageBuilder builder; - private final boolean hashEnabled; private final Optional> hashChannels; RowPagesBuilder(Iterable types) { - this(false, Optional.empty(), types); + this(Optional.empty(), types); } - RowPagesBuilder(boolean hashEnabled, Optional> hashChannels, Iterable types) + RowPagesBuilder(Optional> hashChannels, Iterable types) { this.types = ImmutableList.copyOf(requireNonNull(types, "types is null")); - this.hashEnabled = hashEnabled; this.hashChannels = hashChannels.map(ImmutableList::copyOf); builder = rowPageBuilder(types); } @@ -118,31 +114,10 @@ public RowPagesBuilder pageBreak() public List build() { pageBreak(); - List resultPages = pages.build(); - if (hashEnabled) { - return pagesWithHash(resultPages); - } - return resultPages; - } - - private List pagesWithHash(List pages) - { - ImmutableList.Builder resultPages = ImmutableList.builder(); - for (Page page : pages) { - resultPages.add(TypeTestUtils.getHashPage(page, types, hashChannels.get())); - } - return resultPages.build(); + return pages.build(); } public List getTypes() - { - if (hashEnabled) { - return ImmutableList.copyOf(Iterables.concat(types, ImmutableList.of(BigintType.BIGINT))); - } - return types; - } - - public List getTypesWithoutHash() { return types; } @@ -151,12 +126,4 @@ public Optional> getHashChannels() { return hashChannels; } - - public Optional getHashChannel() - { - if (hashEnabled) { - return Optional.of(types.size()); - } - return Optional.empty(); - } } diff --git a/core/trino-main/src/test/java/io/trino/operator/BenchmarkHashAndStreamingAggregationOperators.java b/core/trino-main/src/test/java/io/trino/operator/BenchmarkHashAndStreamingAggregationOperators.java index 2c4713959589..b320bc537c5c 100644 --- a/core/trino-main/src/test/java/io/trino/operator/BenchmarkHashAndStreamingAggregationOperators.java +++ b/core/trino-main/src/test/java/io/trino/operator/BenchmarkHashAndStreamingAggregationOperators.java @@ -141,7 +141,6 @@ public void setup() } RowPagesBuilder pagesBuilder = RowPagesBuilder.rowPagesBuilder( - hashAggregation, hashChannels, ImmutableList.builder() .addAll(hashTypes) diff --git a/core/trino-main/src/test/java/io/trino/operator/BenchmarkWindowOperator.java b/core/trino-main/src/test/java/io/trino/operator/BenchmarkWindowOperator.java index e918a80e825d..d3b53ca72a94 100644 --- a/core/trino-main/src/test/java/io/trino/operator/BenchmarkWindowOperator.java +++ b/core/trino-main/src/test/java/io/trino/operator/BenchmarkWindowOperator.java @@ -172,7 +172,7 @@ private List generateTestData() private RowPagesBuilder buildPages(int currentPartitionIdentifier, List typesArray) { int groupIdentifier = 100; - RowPagesBuilder rowPagesBuilder = RowPagesBuilder.rowPagesBuilder(false, ImmutableList.of(0), typesArray); + RowPagesBuilder rowPagesBuilder = RowPagesBuilder.rowPagesBuilder(ImmutableList.of(0), typesArray); for (int i = 0; i < TOTAL_PAGES; i++) { BlockBuilder firstColumnBlockBuilder = BIGINT.createFixedSizeBlockBuilder(ROWS_PER_PAGE); diff --git a/core/trino-main/src/test/java/io/trino/operator/GroupByHashYieldAssertion.java b/core/trino-main/src/test/java/io/trino/operator/GroupByHashYieldAssertion.java index 3c119335bceb..13f121e1e1ee 100644 --- a/core/trino-main/src/test/java/io/trino/operator/GroupByHashYieldAssertion.java +++ b/core/trino-main/src/test/java/io/trino/operator/GroupByHashYieldAssertion.java @@ -55,9 +55,9 @@ public final class GroupByHashYieldAssertion private GroupByHashYieldAssertion() {} - public static List createPagesWithDistinctHashKeys(Type type, int pageCount, int positionCountPerPage) + public static List createPages(Type type, int pageCount, int positionCountPerPage) { - RowPagesBuilder rowPagesBuilder = rowPagesBuilder(true, ImmutableList.of(0), type); + RowPagesBuilder rowPagesBuilder = rowPagesBuilder(ImmutableList.of(0), type); for (int i = 0; i < pageCount; i++) { rowPagesBuilder.addSequencePage(positionCountPerPage, positionCountPerPage * i); } diff --git a/core/trino-main/src/test/java/io/trino/operator/OperatorAssertion.java b/core/trino-main/src/test/java/io/trino/operator/OperatorAssertion.java index 426cd4d2c424..398aabe5f029 100644 --- a/core/trino-main/src/test/java/io/trino/operator/OperatorAssertion.java +++ b/core/trino-main/src/test/java/io/trino/operator/OperatorAssertion.java @@ -30,7 +30,6 @@ import java.util.Collection; import java.util.Iterator; import java.util.List; -import java.util.Optional; import java.util.Set; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; @@ -229,42 +228,12 @@ public static void assertOperatorEquals( List input, MaterializedResult expected, boolean revokeMemoryWhenAddingPages) - { - assertOperatorEquals(operatorFactory, driverContext, input, expected, false, ImmutableList.of(), revokeMemoryWhenAddingPages); - } - - public static void assertOperatorEquals( - OperatorFactory operatorFactory, - DriverContext driverContext, - List input, - MaterializedResult expected, - boolean revokeMemoryWhenAddingPages, - boolean closeOperatorFactory) - { - assertOperatorEquals(operatorFactory, driverContext, input, expected, false, ImmutableList.of(), revokeMemoryWhenAddingPages, closeOperatorFactory); - } - - public static void assertOperatorEquals(OperatorFactory operatorFactory, DriverContext driverContext, List input, MaterializedResult expected, boolean hashEnabled, List hashChannels) - { - assertOperatorEquals(operatorFactory, driverContext, input, expected, hashEnabled, hashChannels, true); - } - - public static void assertOperatorEquals( - OperatorFactory operatorFactory, - DriverContext driverContext, - List input, - MaterializedResult expected, - boolean hashEnabled, - List hashChannels, - boolean revokeMemoryWhenAddingPages) { assertOperatorEquals( operatorFactory, driverContext, input, expected, - hashEnabled, - hashChannels, revokeMemoryWhenAddingPages, true); } @@ -274,16 +243,10 @@ public static void assertOperatorEquals( DriverContext driverContext, List input, MaterializedResult expected, - boolean hashEnabled, - List hashChannels, boolean revokeMemoryWhenAddingPages, boolean closeOperatorFactory) { List pages = toPages(operatorFactory, driverContext, input, revokeMemoryWhenAddingPages, closeOperatorFactory); - if (hashEnabled && !hashChannels.isEmpty()) { - // Drop the hashChannel for all pages - pages = dropChannel(pages, hashChannels); - } MaterializedResult actual = toMaterializedResult(driverContext.getSession(), expected.getTypes(), pages); assertThat(actual).containsExactlyElementsOf(expected); } @@ -294,28 +257,7 @@ public static void assertOperatorEqualsIgnoreOrder( List input, MaterializedResult expected) { - assertOperatorEqualsIgnoreOrder(operatorFactory, driverContext, input, expected, false); - } - - public static void assertOperatorEqualsIgnoreOrder( - OperatorFactory operatorFactory, - DriverContext driverContext, - List input, - MaterializedResult expected, - boolean revokeMemoryWhenAddingPages) - { - assertOperatorEqualsIgnoreOrder(operatorFactory, driverContext, input, expected, false, Optional.empty(), revokeMemoryWhenAddingPages); - } - - public static void assertOperatorEqualsIgnoreOrder( - OperatorFactory operatorFactory, - DriverContext driverContext, - List input, - MaterializedResult expected, - boolean hashEnabled, - Optional hashChannel) - { - assertOperatorEqualsIgnoreOrder(operatorFactory, driverContext, input, expected, hashEnabled, hashChannel, true); + assertOperatorEqualsIgnoreOrder(operatorFactory, driverContext, input, expected, true); } public static void assertOperatorEqualsIgnoreOrder( @@ -323,29 +265,19 @@ public static void assertOperatorEqualsIgnoreOrder( DriverContext driverContext, List input, MaterializedResult expected, - boolean hashEnabled, - Optional hashChannel, boolean revokeMemoryWhenAddingPages) { assertPagesEqualIgnoreOrder( driverContext, toPages(operatorFactory, driverContext, input, revokeMemoryWhenAddingPages), - expected, - hashEnabled, - hashChannel); + expected); } public static void assertPagesEqualIgnoreOrder( DriverContext driverContext, List actualPages, - MaterializedResult expected, - boolean hashEnabled, - Optional hashChannel) + MaterializedResult expected) { - if (hashEnabled && hashChannel.isPresent()) { - // Drop the hashChannel for all pages - actualPages = dropChannel(actualPages, ImmutableList.of(hashChannel.get())); - } MaterializedResult actual = toMaterializedResult(driverContext.getSession(), expected.getTypes(), actualPages); assertThat(ImmutableMultiset.copyOf(actual.getMaterializedRows())).isEqualTo(ImmutableMultiset.copyOf(expected.getMaterializedRows())); } diff --git a/core/trino-main/src/test/java/io/trino/operator/TestDistinctLimitOperator.java b/core/trino-main/src/test/java/io/trino/operator/TestDistinctLimitOperator.java index 3562c17928dd..1437a5eb0dc7 100644 --- a/core/trino-main/src/test/java/io/trino/operator/TestDistinctLimitOperator.java +++ b/core/trino-main/src/test/java/io/trino/operator/TestDistinctLimitOperator.java @@ -33,7 +33,7 @@ import static io.airlift.concurrent.Threads.daemonThreadsNamed; import static io.trino.RowPagesBuilder.rowPagesBuilder; import static io.trino.SessionTestUtils.TEST_SESSION; -import static io.trino.operator.GroupByHashYieldAssertion.createPagesWithDistinctHashKeys; +import static io.trino.operator.GroupByHashYieldAssertion.createPages; import static io.trino.operator.GroupByHashYieldAssertion.finishOperatorWithYieldingGroupByHash; import static io.trino.operator.OperatorAssertion.assertOperatorEquals; import static io.trino.spi.type.BigintType.BIGINT; @@ -63,15 +63,9 @@ public void tearDown() @Test public void testDistinctLimit() - { - testDistinctLimit(true); - testDistinctLimit(false); - } - - public void testDistinctLimit(boolean hashEnabled) { DriverContext driverContext = newDriverContext(); - RowPagesBuilder rowPagesBuilder = rowPagesBuilder(hashEnabled, Ints.asList(0), BIGINT); + RowPagesBuilder rowPagesBuilder = rowPagesBuilder(Ints.asList(0), BIGINT); List input = rowPagesBuilder .addSequencePage(3, 1) .addSequencePage(5, 2) @@ -93,20 +87,14 @@ public void testDistinctLimit(boolean hashEnabled) .row(5L) .build(); - assertOperatorEquals(operatorFactory, driverContext, input, expected, hashEnabled, ImmutableList.of(1)); + assertOperatorEquals(operatorFactory, driverContext, input, expected); } @Test public void testDistinctLimitWithPageAlignment() - { - testDistinctLimitWithPageAlignment(true); - testDistinctLimitWithPageAlignment(false); - } - - public void testDistinctLimitWithPageAlignment(boolean hashEnabled) { DriverContext driverContext = newDriverContext(); - RowPagesBuilder rowPagesBuilder = rowPagesBuilder(hashEnabled, Ints.asList(0), BIGINT); + RowPagesBuilder rowPagesBuilder = rowPagesBuilder(Ints.asList(0), BIGINT); List input = rowPagesBuilder .addSequencePage(3, 1) .addSequencePage(3, 2) @@ -126,21 +114,15 @@ public void testDistinctLimitWithPageAlignment(boolean hashEnabled) .row(3L) .build(); - assertOperatorEquals(operatorFactory, driverContext, input, expected, hashEnabled, ImmutableList.of(1)); + assertOperatorEquals(operatorFactory, driverContext, input, expected); } @Test public void testDistinctLimitValuesLessThanLimit() - { - testDistinctLimitValuesLessThanLimit(true); - testDistinctLimitValuesLessThanLimit(false); - } - - public void testDistinctLimitValuesLessThanLimit(boolean hashEnabled) { DriverContext driverContext = newDriverContext(); - RowPagesBuilder rowPagesBuilder = rowPagesBuilder(hashEnabled, Ints.asList(0), BIGINT); + RowPagesBuilder rowPagesBuilder = rowPagesBuilder(Ints.asList(0), BIGINT); List input = rowPagesBuilder .addSequencePage(3, 1) .addSequencePage(3, 2) @@ -161,7 +143,7 @@ public void testDistinctLimitValuesLessThanLimit(boolean hashEnabled) .row(4L) .build(); - assertOperatorEquals(operatorFactory, driverContext, input, expected, hashEnabled, ImmutableList.of(1)); + assertOperatorEquals(operatorFactory, driverContext, input, expected); } @Test @@ -173,7 +155,7 @@ public void testMemoryReservationYield() public void testMemoryReservationYield(Type type) { - List input = createPagesWithDistinctHashKeys(type, 6_000, 600); + List input = createPages(type, 6_000, 600); OperatorFactory operatorFactory = new DistinctLimitOperator.DistinctLimitOperatorFactory( 0, diff --git a/core/trino-main/src/test/java/io/trino/operator/TestGroupIdOperator.java b/core/trino-main/src/test/java/io/trino/operator/TestGroupIdOperator.java index 3f1f3da03796..e1d27ca64f27 100644 --- a/core/trino-main/src/test/java/io/trino/operator/TestGroupIdOperator.java +++ b/core/trino-main/src/test/java/io/trino/operator/TestGroupIdOperator.java @@ -70,7 +70,7 @@ public void tearDown() @Test public void testGroupId() { - RowPagesBuilder rowPagesBuilder = rowPagesBuilder(false, ImmutableList.of(), BIGINT, VARCHAR, BOOLEAN, BIGINT); + RowPagesBuilder rowPagesBuilder = rowPagesBuilder(ImmutableList.of(), BIGINT, VARCHAR, BOOLEAN, BIGINT); List input = rowPagesBuilder .addSequencePage(3, 100, 400, 0, 1000) .addSequencePage(3, 200, 500, 0, 1100) diff --git a/core/trino-main/src/test/java/io/trino/operator/TestHashAggregationOperator.java b/core/trino-main/src/test/java/io/trino/operator/TestHashAggregationOperator.java index acba5f72b28f..176f97b63027 100644 --- a/core/trino-main/src/test/java/io/trino/operator/TestHashAggregationOperator.java +++ b/core/trino-main/src/test/java/io/trino/operator/TestHashAggregationOperator.java @@ -72,7 +72,7 @@ import static io.trino.block.BlockAssertions.createRepeatedValuesBlock; import static io.trino.operator.AggregationMetrics.INPUT_ROWS_WITH_PARTIAL_AGGREGATION_DISABLED_METRIC_NAME; import static io.trino.operator.GroupByHashYieldAssertion.GroupByHashYieldResult; -import static io.trino.operator.GroupByHashYieldAssertion.createPagesWithDistinctHashKeys; +import static io.trino.operator.GroupByHashYieldAssertion.createPages; import static io.trino.operator.GroupByHashYieldAssertion.finishOperatorWithYieldingGroupByHash; import static io.trino.operator.OperatorAssertion.assertOperatorEqualsIgnoreOrder; import static io.trino.operator.OperatorAssertion.assertPagesEqualIgnoreOrder; @@ -144,7 +144,7 @@ private void testHashAggregation(boolean spillEnabled, boolean revokeMemoryWhenA TestingAggregationFunction countBooleanColumn = FUNCTION_RESOLUTION.getAggregateFunction("count", fromTypes(BOOLEAN)); TestingAggregationFunction maxVarcharColumn = FUNCTION_RESOLUTION.getAggregateFunction("max", fromTypes(VARCHAR)); List hashChannels = Ints.asList(1); - RowPagesBuilder rowPagesBuilder = rowPagesBuilder(false, hashChannels, VARCHAR, VARCHAR, VARCHAR, BIGINT, BOOLEAN); + RowPagesBuilder rowPagesBuilder = rowPagesBuilder(hashChannels, VARCHAR, VARCHAR, VARCHAR, BIGINT, BOOLEAN); List input = rowPagesBuilder .addSequencePage(numberOfRows, 100, 0, 100_000, 0, 500) .addSequencePage(numberOfRows, 100, 0, 200_000, 0, 500) @@ -185,7 +185,7 @@ private void testHashAggregation(boolean spillEnabled, boolean revokeMemoryWhenA List pages = toPages(operatorFactory, driverContext, input, revokeMemoryWhenAddingPages); assertThat(pages).as("Expected more than one output page").hasSizeGreaterThan(1); - assertPagesEqualIgnoreOrder(driverContext, pages, expected, false, Optional.of(hashChannels.size())); + assertPagesEqualIgnoreOrder(driverContext, pages, expected); assertThat(spillEnabled == (spillerFactory.getSpillsCount() > 0)) .describedAs(format("Spill state mismatch. Expected spill: %s, spill count: %s", spillEnabled, spillerFactory.getSpillsCount())) @@ -215,7 +215,7 @@ private void testHashAggregationWithGlobals(boolean spillEnabled, boolean revoke Optional groupIdChannel = Optional.of(1); List groupByChannels = Ints.asList(1, 2); List globalAggregationGroupIds = Ints.asList(42, 49); - RowPagesBuilder rowPagesBuilder = rowPagesBuilder(false, groupByChannels, VARCHAR, VARCHAR, VARCHAR, BIGINT, BIGINT, BOOLEAN); + RowPagesBuilder rowPagesBuilder = rowPagesBuilder(groupByChannels, VARCHAR, VARCHAR, VARCHAR, BIGINT, BIGINT, BOOLEAN); List input = rowPagesBuilder.build(); HashAggregationOperatorFactory operatorFactory = new HashAggregationOperatorFactory( @@ -248,7 +248,7 @@ private void testHashAggregationWithGlobals(boolean spillEnabled, boolean revoke .row(null, 49L, 0L, null, null, null, 0L, 0L) .build(); - assertOperatorEqualsIgnoreOrder(operatorFactory, driverContext, input, expected, false, Optional.of(groupByChannels.size()), revokeMemoryWhenAddingPages); + assertOperatorEqualsIgnoreOrder(operatorFactory, driverContext, input, expected, revokeMemoryWhenAddingPages); } @Test @@ -270,7 +270,7 @@ private void testHashAggregationMemoryReservation(boolean spillEnabled, boolean TestingAggregationFunction arrayAggColumn = FUNCTION_RESOLUTION.getAggregateFunction("array_agg", fromTypes(BIGINT)); List hashChannels = Ints.asList(1); - RowPagesBuilder rowPagesBuilder = rowPagesBuilder(false, hashChannels, BIGINT, BIGINT); + RowPagesBuilder rowPagesBuilder = rowPagesBuilder(hashChannels, BIGINT, BIGINT); List input = rowPagesBuilder .addSequencePage(10, 100, 0) .addSequencePage(10, 200, 0) @@ -314,7 +314,7 @@ public void testMemoryLimit() TestingAggregationFunction maxVarcharColumn = FUNCTION_RESOLUTION.getAggregateFunction("max", fromTypes(VARCHAR)); List hashChannels = Ints.asList(1); - RowPagesBuilder rowPagesBuilder = rowPagesBuilder(false, hashChannels, VARCHAR, BIGINT, VARCHAR, BIGINT); + RowPagesBuilder rowPagesBuilder = rowPagesBuilder(hashChannels, VARCHAR, BIGINT, VARCHAR, BIGINT); List input = rowPagesBuilder .addSequencePage(10, 100, 0, 100, 0) .addSequencePage(10, 100, 0, 200, 0) @@ -369,7 +369,7 @@ private void testHashBuilderResize(boolean spillEnabled, boolean revokeMemoryWhe builder.build(); List hashChannels = Ints.asList(0); - RowPagesBuilder rowPagesBuilder = rowPagesBuilder(false, hashChannels, VARCHAR); + RowPagesBuilder rowPagesBuilder = rowPagesBuilder(hashChannels, VARCHAR); List input = rowPagesBuilder .addSequencePage(10, 100) .addBlocksPage(builder.build()) @@ -409,7 +409,7 @@ public void testMemoryReservationYield() public void testMemoryReservationYield(Type type) { - List input = createPagesWithDistinctHashKeys(type, 6_000, 600); + List input = createPages(type, 6_000, 600); OperatorFactory operatorFactory = new HashAggregationOperatorFactory( 0, new PlanNodeId("test"), @@ -451,7 +451,7 @@ public void testHashBuilderResizeLimit() builder.build(); List hashChannels = Ints.asList(0); - RowPagesBuilder rowPagesBuilder = rowPagesBuilder(false, hashChannels, VARCHAR); + RowPagesBuilder rowPagesBuilder = rowPagesBuilder(hashChannels, VARCHAR); List input = rowPagesBuilder .addSequencePage(10, 100) .addBlocksPage(builder.build()) @@ -492,7 +492,7 @@ public void testMultiSliceAggregationOutput() int multiSlicePositionCount = (int) (1.5 * PageBuilderStatus.DEFAULT_MAX_PAGE_SIZE_IN_BYTES / fixedWidthSize); List hashChannels = Ints.asList(1); - RowPagesBuilder rowPagesBuilder = rowPagesBuilder(false, hashChannels, BIGINT, BIGINT); + RowPagesBuilder rowPagesBuilder = rowPagesBuilder(hashChannels, BIGINT, BIGINT); List input = rowPagesBuilder .addSequencePage(multiSlicePositionCount, 0, 0) .build(); @@ -520,7 +520,7 @@ public void testMultiplePartialFlushes() throws Exception { List hashChannels = Ints.asList(0); - RowPagesBuilder rowPagesBuilder = rowPagesBuilder(false, hashChannels, BIGINT); + RowPagesBuilder rowPagesBuilder = rowPagesBuilder(hashChannels, BIGINT); List input = rowPagesBuilder .addSequencePage(500, 0) .addSequencePage(500, 500) @@ -700,7 +700,7 @@ public void testSpillerFailure() List hashChannels = Ints.asList(1); List types = ImmutableList.of(VARCHAR, BIGINT, VARCHAR, BIGINT); - RowPagesBuilder rowPagesBuilder = rowPagesBuilder(false, hashChannels, types); + RowPagesBuilder rowPagesBuilder = rowPagesBuilder(hashChannels, types); List input = rowPagesBuilder .addSequencePage(10, 100, 0, 100, 0) // current accumulator allows 1024 values without using revocable memory, so add enough values to cause revocable memory usage @@ -748,7 +748,7 @@ public void testMemoryTracking() throws Exception { List hashChannels = Ints.asList(0); - RowPagesBuilder rowPagesBuilder = rowPagesBuilder(false, hashChannels, BIGINT); + RowPagesBuilder rowPagesBuilder = rowPagesBuilder(hashChannels, BIGINT); Page input = getOnlyElement(rowPagesBuilder.addSequencePage(500, 0).build()); HashAggregationOperatorFactory operatorFactory = new HashAggregationOperatorFactory( @@ -805,7 +805,7 @@ public void testAdaptivePartialAggregation() // at the start partial aggregation is enabled assertThat(partialAggregationController.isPartialAggregationDisabled()).isFalse(); // First operator will trigger adaptive partial aggregation after the first page - List operator1Input = rowPagesBuilder(false, hashChannels, BIGINT) + List operator1Input = rowPagesBuilder(hashChannels, BIGINT) .addBlocksPage(createLongsBlock(0, 1, 2, 3, 4, 5, 6, 7, 8, 8)) // first page will be hashed but the values are almost unique, so it will trigger adaptation .addBlocksPage(createRepeatedValuesBlock(1, 10)) // second page would be hashed to existing value 1. but if adaptive PA kicks in, the raw values will be passed on .build(); @@ -818,7 +818,7 @@ public void testAdaptivePartialAggregation() // the first operator flush disables partial aggregation assertThat(partialAggregationController.isPartialAggregationDisabled()).isTrue(); // second operator using the same factory, reuses PartialAggregationControl, so it will only produce raw pages (partial aggregation is disabled at this point) - List operator2Input = rowPagesBuilder(false, hashChannels, BIGINT) + List operator2Input = rowPagesBuilder(hashChannels, BIGINT) .addBlocksPage(createRepeatedValuesBlock(1, 10)) .addBlocksPage(createRepeatedValuesBlock(2, 10)) .build(); @@ -830,7 +830,7 @@ public void testAdaptivePartialAggregation() // partial aggregation should be enabled again after enough data is processed for (int i = 1; i <= 4; ++i) { - List operatorInput = rowPagesBuilder(false, hashChannels, BIGINT) + List operatorInput = rowPagesBuilder(hashChannels, BIGINT) .addBlocksPage(createLongsBlock(0, 1, 2, 3, 4, 5, 6, 7, 8)) .build(); List operatorExpected = rowPagesBuilder(BIGINT, BIGINT) @@ -849,7 +849,7 @@ public void testAdaptivePartialAggregation() partialAggregationController.onFlush(1_000_000, 1_000_000, OptionalLong.empty()); // partial aggregation should keep being enabled after good reduction has been observed - List operator3Input = rowPagesBuilder(false, hashChannels, BIGINT) + List operator3Input = rowPagesBuilder(hashChannels, BIGINT) .addBlocksPage(createRepeatedValuesBlock(1, 100)) .addBlocksPage(createRepeatedValuesBlock(2, 100)) .build(); @@ -883,7 +883,7 @@ public void testAdaptivePartialAggregationTriggeredOnlyOnFlush() Optional.of(partialAggregationController)); DriverContext driverContext = createDriverContext(1024); - List operator1Input = rowPagesBuilder(false, hashChannels, BIGINT) + List operator1Input = rowPagesBuilder(hashChannels, BIGINT) .addSequencePage(10, 0) // first page are unique values, so it would trigger adaptation, but it won't because flush is not called .addBlocksPage(createRepeatedValuesBlock(1, 2)) // second page will be hashed to existing value 1 .build(); @@ -898,7 +898,7 @@ public void testAdaptivePartialAggregationTriggeredOnlyOnFlush() assertInputRowsWithPartialAggregationDisabled(driverContext, 0); // second operator using the same factory, reuses PartialAggregationControl, so it will only produce raw pages (partial aggregation is disabled at this point) - List operator2Input = rowPagesBuilder(false, hashChannels, BIGINT) + List operator2Input = rowPagesBuilder(hashChannels, BIGINT) .addBlocksPage(createRepeatedValuesBlock(1, 10)) .addBlocksPage(createRepeatedValuesBlock(2, 10)) .build(); @@ -931,7 +931,7 @@ public void testAsyncSpillBlocksAndUnblocksDriver() long memoryLimitForMergeWithMemory = 0; // plenty of rows → revocable mem > - RowPagesBuilder pages = rowPagesBuilder(false, Ints.asList(0), BIGINT) + RowPagesBuilder pages = rowPagesBuilder(Ints.asList(0), BIGINT) .addSequencePage(5_000, 0); HashAggregationOperatorFactory factory = @@ -990,7 +990,7 @@ public void testRevocableMemoryConvertedAfterAsyncSpill() long memoryLimitForMergeWithMemory = 0; // make shouldMergeWithMemory() return false // plenty of rows to allocate >64 kB in the hash builder - RowPagesBuilder pagesBuilder = rowPagesBuilder(false, Ints.asList(0), BIGINT) + RowPagesBuilder pagesBuilder = rowPagesBuilder(Ints.asList(0), BIGINT) .addSequencePage(50_000, 0); SlowSpiller slowSpiller = new SlowSpiller(); diff --git a/core/trino-main/src/test/java/io/trino/operator/TestHashSemiJoinOperator.java b/core/trino-main/src/test/java/io/trino/operator/TestHashSemiJoinOperator.java index 1ec8b08141ee..e63f716d8207 100644 --- a/core/trino-main/src/test/java/io/trino/operator/TestHashSemiJoinOperator.java +++ b/core/trino-main/src/test/java/io/trino/operator/TestHashSemiJoinOperator.java @@ -82,7 +82,7 @@ public void testSemiJoin() // build OperatorContext operatorContext = driverContext.addOperatorContext(0, new PlanNodeId("test"), ValuesOperator.class.getSimpleName()); - RowPagesBuilder rowPagesBuilder = rowPagesBuilder(false, Ints.asList(0), BIGINT); + RowPagesBuilder rowPagesBuilder = rowPagesBuilder(Ints.asList(0), BIGINT); Operator buildOperator = new ValuesOperator(operatorContext, rowPagesBuilder .row(10L) .row(30L) @@ -109,7 +109,7 @@ public void testSemiJoin() // probe List probeTypes = ImmutableList.of(BIGINT, BIGINT); - RowPagesBuilder rowPagesBuilderProbe = rowPagesBuilder(false, Ints.asList(0), BIGINT, BIGINT); + RowPagesBuilder rowPagesBuilderProbe = rowPagesBuilder(Ints.asList(0), BIGINT, BIGINT); List probeInput = rowPagesBuilderProbe .addSequencePage(10, 30, 0) .build(); @@ -134,7 +134,7 @@ public void testSemiJoin() .row(39L, 9L, false) .build(); - OperatorAssertion.assertOperatorEquals(joinOperatorFactory, driverContext, probeInput, expected, false, ImmutableList.of(probeTypes.size())); + OperatorAssertion.assertOperatorEquals(joinOperatorFactory, driverContext, probeInput, expected); } @Test @@ -144,7 +144,7 @@ public void testSemiJoinOnVarcharType() // build OperatorContext operatorContext = driverContext.addOperatorContext(0, new PlanNodeId("test"), ValuesOperator.class.getSimpleName()); - RowPagesBuilder rowPagesBuilder = rowPagesBuilder(false, Ints.asList(0), VARCHAR); + RowPagesBuilder rowPagesBuilder = rowPagesBuilder(Ints.asList(0), VARCHAR); Operator buildOperator = new ValuesOperator(operatorContext, rowPagesBuilder .row("10") .row("30") @@ -171,7 +171,7 @@ public void testSemiJoinOnVarcharType() // probe List probeTypes = ImmutableList.of(VARCHAR, BIGINT); - RowPagesBuilder rowPagesBuilderProbe = rowPagesBuilder(false, Ints.asList(0), VARCHAR, BIGINT); + RowPagesBuilder rowPagesBuilderProbe = rowPagesBuilder(Ints.asList(0), VARCHAR, BIGINT); List probeInput = rowPagesBuilderProbe .addSequencePage(10, 30, 0) .build(); @@ -196,7 +196,7 @@ public void testSemiJoinOnVarcharType() .row("39", 9L, false) .build(); - OperatorAssertion.assertOperatorEquals(joinOperatorFactory, driverContext, probeInput, expected, false, ImmutableList.of(probeTypes.size())); + OperatorAssertion.assertOperatorEquals(joinOperatorFactory, driverContext, probeInput, expected); } @Test @@ -207,7 +207,7 @@ public void testBuildSideNulls() // build OperatorContext operatorContext = driverContext.addOperatorContext(0, new PlanNodeId("test"), ValuesOperator.class.getSimpleName()); List buildTypes = ImmutableList.of(BIGINT); - RowPagesBuilder rowPagesBuilder = rowPagesBuilder(false, Ints.asList(0), buildTypes); + RowPagesBuilder rowPagesBuilder = rowPagesBuilder(Ints.asList(0), buildTypes); Operator buildOperator = new ValuesOperator(operatorContext, rowPagesBuilder .row(0L) .row(1L) @@ -233,7 +233,7 @@ public void testBuildSideNulls() // probe List probeTypes = ImmutableList.of(BIGINT); - RowPagesBuilder rowPagesBuilderProbe = rowPagesBuilder(false, Ints.asList(0), probeTypes); + RowPagesBuilder rowPagesBuilderProbe = rowPagesBuilder(Ints.asList(0), probeTypes); List probeInput = rowPagesBuilderProbe .addSequencePage(4, 1) .build(); @@ -252,7 +252,7 @@ public void testBuildSideNulls() .row(4L, null) .build(); - OperatorAssertion.assertOperatorEquals(joinOperatorFactory, driverContext, probeInput, expected, false, ImmutableList.of(probeTypes.size())); + OperatorAssertion.assertOperatorEquals(joinOperatorFactory, driverContext, probeInput, expected); } @Test @@ -263,7 +263,7 @@ public void testProbeSideNulls() // build OperatorContext operatorContext = driverContext.addOperatorContext(0, new PlanNodeId("test"), ValuesOperator.class.getSimpleName()); List buildTypes = ImmutableList.of(BIGINT); - RowPagesBuilder rowPagesBuilder = rowPagesBuilder(false, Ints.asList(0), buildTypes); + RowPagesBuilder rowPagesBuilder = rowPagesBuilder(Ints.asList(0), buildTypes); Operator buildOperator = new ValuesOperator(operatorContext, rowPagesBuilder .row(0L) .row(1L) @@ -286,7 +286,7 @@ public void testProbeSideNulls() // probe List probeTypes = ImmutableList.of(BIGINT); - RowPagesBuilder rowPagesBuilderProbe = rowPagesBuilder(false, Ints.asList(0), probeTypes); + RowPagesBuilder rowPagesBuilderProbe = rowPagesBuilder(Ints.asList(0), probeTypes); List probeInput = rowPagesBuilderProbe .row(0L) .row((Object) null) @@ -308,7 +308,7 @@ public void testProbeSideNulls() .row(2L, false) .build(); - OperatorAssertion.assertOperatorEquals(joinOperatorFactory, driverContext, probeInput, expected, false, ImmutableList.of(probeTypes.size())); + OperatorAssertion.assertOperatorEquals(joinOperatorFactory, driverContext, probeInput, expected); } @Test @@ -319,7 +319,7 @@ public void testProbeAndBuildNulls() // build OperatorContext operatorContext = driverContext.addOperatorContext(0, new PlanNodeId("test"), ValuesOperator.class.getSimpleName()); List buildTypes = ImmutableList.of(BIGINT); - RowPagesBuilder rowPagesBuilder = rowPagesBuilder(false, Ints.asList(0), buildTypes); + RowPagesBuilder rowPagesBuilder = rowPagesBuilder(Ints.asList(0), buildTypes); Operator buildOperator = new ValuesOperator(operatorContext, rowPagesBuilder .row(0L) .row(1L) @@ -343,7 +343,7 @@ public void testProbeAndBuildNulls() // probe List probeTypes = ImmutableList.of(BIGINT); - RowPagesBuilder rowPagesBuilderProbe = rowPagesBuilder(false, Ints.asList(0), probeTypes); + RowPagesBuilder rowPagesBuilderProbe = rowPagesBuilder(Ints.asList(0), probeTypes); List probeInput = rowPagesBuilderProbe .row(0L) .row((Object) null) @@ -365,7 +365,7 @@ public void testProbeAndBuildNulls() .row(2L, null) .build(); - OperatorAssertion.assertOperatorEquals(joinOperatorFactory, driverContext, probeInput, expected, false, ImmutableList.of(probeTypes.size())); + OperatorAssertion.assertOperatorEquals(joinOperatorFactory, driverContext, probeInput, expected); } @Test @@ -378,7 +378,7 @@ public void testMemoryLimit() OperatorContext operatorContext = driverContext.addOperatorContext(0, new PlanNodeId("test"), ValuesOperator.class.getSimpleName()); List buildTypes = ImmutableList.of(BIGINT); - RowPagesBuilder rowPagesBuilder = rowPagesBuilder(false, Ints.asList(0), buildTypes); + RowPagesBuilder rowPagesBuilder = rowPagesBuilder(Ints.asList(0), buildTypes); Operator buildOperator = new ValuesOperator(operatorContext, rowPagesBuilder .addSequencePage(10000, 20) .build()); diff --git a/core/trino-main/src/test/java/io/trino/operator/TestMarkDistinctOperator.java b/core/trino-main/src/test/java/io/trino/operator/TestMarkDistinctOperator.java index ca753f218c5c..a22abccd09f7 100644 --- a/core/trino-main/src/test/java/io/trino/operator/TestMarkDistinctOperator.java +++ b/core/trino-main/src/test/java/io/trino/operator/TestMarkDistinctOperator.java @@ -30,7 +30,6 @@ import org.junit.jupiter.api.parallel.Execution; import java.util.List; -import java.util.Optional; import java.util.concurrent.ExecutorService; import java.util.concurrent.ScheduledExecutorService; @@ -38,7 +37,7 @@ import static io.airlift.concurrent.Threads.daemonThreadsNamed; import static io.trino.RowPagesBuilder.rowPagesBuilder; import static io.trino.SessionTestUtils.TEST_SESSION; -import static io.trino.operator.GroupByHashYieldAssertion.createPagesWithDistinctHashKeys; +import static io.trino.operator.GroupByHashYieldAssertion.createPages; import static io.trino.operator.GroupByHashYieldAssertion.finishOperatorWithYieldingGroupByHash; import static io.trino.spi.type.BigintType.BIGINT; import static io.trino.spi.type.BooleanType.BOOLEAN; @@ -70,13 +69,8 @@ public void tearDown() @Test public void testMarkDistinct() { - testMarkDistinct(true, newDriverContext()); - testMarkDistinct(false, newDriverContext()); - } - - private void testMarkDistinct(boolean hashEnabled, DriverContext driverContext) - { - RowPagesBuilder rowPagesBuilder = rowPagesBuilder(hashEnabled, Ints.asList(0), BIGINT); + DriverContext driverContext = newDriverContext(); + RowPagesBuilder rowPagesBuilder = rowPagesBuilder(Ints.asList(0), BIGINT); List input = rowPagesBuilder .addSequencePage(100, 0) .addSequencePage(100, 0) @@ -95,19 +89,14 @@ private void testMarkDistinct(boolean hashEnabled, DriverContext driverContext) expected.row(i, false); } - OperatorAssertion.assertOperatorEqualsIgnoreOrder(operatorFactory, driverContext, input, expected.build(), hashEnabled, Optional.of(1)); + OperatorAssertion.assertOperatorEqualsIgnoreOrder(operatorFactory, driverContext, input, expected.build()); } @Test public void testRleDistinctMask() { - testRleDistinctMask(true, newDriverContext()); - testRleDistinctMask(false, newDriverContext()); - } - - private void testRleDistinctMask(boolean hashEnabled, DriverContext driverContext) - { - RowPagesBuilder rowPagesBuilder = rowPagesBuilder(hashEnabled, Ints.asList(0), BIGINT); + DriverContext driverContext = newDriverContext(); + RowPagesBuilder rowPagesBuilder = rowPagesBuilder(Ints.asList(0), BIGINT); List inputs = rowPagesBuilder .addSequencePage(100, 0) .addSequencePage(100, 50) @@ -176,7 +165,7 @@ public void testMemoryReservationYield() private void testMemoryReservationYield(Type type) { - List input = createPagesWithDistinctHashKeys(type, 6_000, 600); + List input = createPages(type, 6_000, 600); OperatorFactory operatorFactory = new MarkDistinctOperatorFactory(0, new PlanNodeId("test"), ImmutableList.of(type), ImmutableList.of(0), hashStrategyCompiler); @@ -187,9 +176,9 @@ private void testMemoryReservationYield(Type type) int count = 0; for (Page page : result.getOutput()) { - assertThat(page.getChannelCount()).isEqualTo(3); + assertThat(page.getChannelCount()).isEqualTo(2); for (int i = 0; i < page.getPositionCount(); i++) { - assertThat(BOOLEAN.getBoolean(page.getBlock(2), i)).isTrue(); + assertThat(BOOLEAN.getBoolean(page.getBlock(1), i)).isTrue(); count++; } } diff --git a/core/trino-main/src/test/java/io/trino/operator/TestRowNumberOperator.java b/core/trino-main/src/test/java/io/trino/operator/TestRowNumberOperator.java index fe8776db139a..f833e785296f 100644 --- a/core/trino-main/src/test/java/io/trino/operator/TestRowNumberOperator.java +++ b/core/trino-main/src/test/java/io/trino/operator/TestRowNumberOperator.java @@ -42,7 +42,7 @@ import static io.airlift.concurrent.Threads.daemonThreadsNamed; import static io.trino.RowPagesBuilder.rowPagesBuilder; import static io.trino.SessionTestUtils.TEST_SESSION; -import static io.trino.operator.GroupByHashYieldAssertion.createPagesWithDistinctHashKeys; +import static io.trino.operator.GroupByHashYieldAssertion.createPages; import static io.trino.operator.GroupByHashYieldAssertion.finishOperatorWithYieldingGroupByHash; import static io.trino.operator.OperatorAssertion.toMaterializedResult; import static io.trino.operator.OperatorAssertion.toPages; @@ -142,7 +142,7 @@ public void testRowNumberUnpartitioned() public void testMemoryReservationYield() { for (Type type : Arrays.asList(VARCHAR, BIGINT)) { - List input = createPagesWithDistinctHashKeys(type, 6_000, 600); + List input = createPages(type, 6_000, 600); OperatorFactory operatorFactory = new RowNumberOperator.RowNumberOperatorFactory( 0, @@ -176,7 +176,7 @@ public void testMemoryReservationYield() public void testRowNumberPartitioned() { DriverContext driverContext = getDriverContext(); - RowPagesBuilder rowPagesBuilder = rowPagesBuilder(false, Ints.asList(0), BIGINT, DOUBLE); + RowPagesBuilder rowPagesBuilder = rowPagesBuilder(Ints.asList(0), BIGINT, DOUBLE); List input = rowPagesBuilder .row(1L, 0.3) .row(2L, 0.2) @@ -241,7 +241,7 @@ public void testRowNumberPartitioned() public void testRowNumberPartitionedLimit() { DriverContext driverContext = getDriverContext(); - RowPagesBuilder rowPagesBuilder = rowPagesBuilder(false, Ints.asList(0), BIGINT, DOUBLE); + RowPagesBuilder rowPagesBuilder = rowPagesBuilder(Ints.asList(0), BIGINT, DOUBLE); List input = rowPagesBuilder .row(1L, 0.3) .row(2L, 0.2) diff --git a/core/trino-main/src/test/java/io/trino/operator/TestTopNRankingOperator.java b/core/trino-main/src/test/java/io/trino/operator/TestTopNRankingOperator.java index 648b766033c1..90c711980365 100644 --- a/core/trino-main/src/test/java/io/trino/operator/TestTopNRankingOperator.java +++ b/core/trino-main/src/test/java/io/trino/operator/TestTopNRankingOperator.java @@ -40,7 +40,7 @@ import static io.airlift.concurrent.Threads.daemonThreadsNamed; import static io.trino.RowPagesBuilder.rowPagesBuilder; import static io.trino.SessionTestUtils.TEST_SESSION; -import static io.trino.operator.GroupByHashYieldAssertion.createPagesWithDistinctHashKeys; +import static io.trino.operator.GroupByHashYieldAssertion.createPages; import static io.trino.operator.GroupByHashYieldAssertion.finishOperatorWithYieldingGroupByHash; import static io.trino.operator.OperatorAssertion.assertOperatorEquals; import static io.trino.spi.connector.SortOrder.ASC_NULLS_FIRST; @@ -79,56 +79,54 @@ public void tearDown() @Test public void testPartitioned() { - for (boolean hashEnabled : Arrays.asList(true, false)) { - DriverContext driverContext = newDriverContext(); + DriverContext driverContext = newDriverContext(); - RowPagesBuilder rowPagesBuilder = rowPagesBuilder(hashEnabled, Ints.asList(0), VARCHAR, DOUBLE); - List input = rowPagesBuilder - .row("a", 0.3) - .row("b", 0.2) - .row("c", 0.1) - .row("c", 0.91) - .pageBreak() - .row("a", 0.4) - .pageBreak() - .row("a", 0.5) - .row("a", 0.6) - .row("b", 0.7) - .row("b", 0.8) - .pageBreak() - .row("b", 0.9) - .build(); + RowPagesBuilder rowPagesBuilder = rowPagesBuilder(Ints.asList(0), VARCHAR, DOUBLE); + List input = rowPagesBuilder + .row("a", 0.3) + .row("b", 0.2) + .row("c", 0.1) + .row("c", 0.91) + .pageBreak() + .row("a", 0.4) + .pageBreak() + .row("a", 0.5) + .row("a", 0.6) + .row("b", 0.7) + .row("b", 0.8) + .pageBreak() + .row("b", 0.9) + .build(); - TopNRankingOperatorFactory operatorFactory = new TopNRankingOperatorFactory( - 0, - new PlanNodeId("test"), - ROW_NUMBER, - ImmutableList.of(VARCHAR, DOUBLE), - Ints.asList(1, 0), - Ints.asList(0), - ImmutableList.of(VARCHAR), - Ints.asList(1), - 3, - false, - 10, - Optional.empty(), - hashStrategyCompiler, - orderingCompiler.compilePageWithPositionComparator(ImmutableList.of(DOUBLE), Ints.asList(1), ImmutableList.of(SortOrder.ASC_NULLS_LAST)), - blockTypeOperators); + TopNRankingOperatorFactory operatorFactory = new TopNRankingOperatorFactory( + 0, + new PlanNodeId("test"), + ROW_NUMBER, + ImmutableList.of(VARCHAR, DOUBLE), + Ints.asList(1, 0), + Ints.asList(0), + ImmutableList.of(VARCHAR), + Ints.asList(1), + 3, + false, + 10, + Optional.empty(), + hashStrategyCompiler, + orderingCompiler.compilePageWithPositionComparator(ImmutableList.of(DOUBLE), Ints.asList(1), ImmutableList.of(SortOrder.ASC_NULLS_LAST)), + blockTypeOperators); - MaterializedResult expected = resultBuilder(driverContext.getSession(), DOUBLE, VARCHAR, BIGINT) - .row(0.3, "a", 1L) - .row(0.4, "a", 2L) - .row(0.5, "a", 3L) - .row(0.2, "b", 1L) - .row(0.7, "b", 2L) - .row(0.8, "b", 3L) - .row(0.1, "c", 1L) - .row(0.91, "c", 2L) - .build(); + MaterializedResult expected = resultBuilder(driverContext.getSession(), DOUBLE, VARCHAR, BIGINT) + .row(0.3, "a", 1L) + .row(0.4, "a", 2L) + .row(0.5, "a", 3L) + .row(0.2, "b", 1L) + .row(0.7, "b", 2L) + .row(0.8, "b", 3L) + .row(0.1, "c", 1L) + .row(0.91, "c", 2L) + .build(); - assertOperatorEquals(operatorFactory, driverContext, input, expected); - } + assertOperatorEquals(operatorFactory, driverContext, input, expected); } @Test @@ -257,7 +255,7 @@ public void testPartialFlush() public void testMemoryReservationYield() { Type type = BIGINT; - List input = createPagesWithDistinctHashKeys(type, 1_000, 500); + List input = createPages(type, 1_000, 500); OperatorFactory operatorFactory = new TopNRankingOperatorFactory( 0, diff --git a/core/trino-main/src/test/java/io/trino/operator/join/BenchmarkHashBuildAndJoinOperators.java b/core/trino-main/src/test/java/io/trino/operator/join/BenchmarkHashBuildAndJoinOperators.java index e14846065a4e..b2e07358939f 100644 --- a/core/trino-main/src/test/java/io/trino/operator/join/BenchmarkHashBuildAndJoinOperators.java +++ b/core/trino-main/src/test/java/io/trino/operator/join/BenchmarkHashBuildAndJoinOperators.java @@ -165,7 +165,7 @@ public List getBuildPages() protected void initializeBuildPages() { - RowPagesBuilder buildPagesBuilder = rowPagesBuilder(false, hashChannels, ImmutableList.of(VARCHAR, BIGINT, BIGINT)); + RowPagesBuilder buildPagesBuilder = rowPagesBuilder(hashChannels, ImmutableList.of(VARCHAR, BIGINT, BIGINT)); int maxValue = buildRowsNumber / buildRowsRepetition + 40; int rows = 0; @@ -249,7 +249,7 @@ public List getProbePages() protected void initializeProbePages() { - RowPagesBuilder probePagesBuilder = rowPagesBuilder(false, hashChannels, ImmutableList.of(VARCHAR, BIGINT, BIGINT)); + RowPagesBuilder probePagesBuilder = rowPagesBuilder(hashChannels, ImmutableList.of(VARCHAR, BIGINT, BIGINT)); Random random = new Random(42); int remainingRows = PROBE_ROWS_NUMBER; diff --git a/core/trino-main/src/test/java/io/trino/operator/join/TestHashJoinOperator.java b/core/trino-main/src/test/java/io/trino/operator/join/TestHashJoinOperator.java index 6b2b442728fa..b871b7eea072 100644 --- a/core/trino-main/src/test/java/io/trino/operator/join/TestHashJoinOperator.java +++ b/core/trino-main/src/test/java/io/trino/operator/join/TestHashJoinOperator.java @@ -154,13 +154,13 @@ private void testInnerJoin(boolean parallelBuild) TaskContext taskContext = createTaskContext(); // build factory - RowPagesBuilder buildPages = rowPagesBuilder(false, Ints.asList(0), ImmutableList.of(VARCHAR, BIGINT, BIGINT)) + RowPagesBuilder buildPages = rowPagesBuilder(Ints.asList(0), ImmutableList.of(VARCHAR, BIGINT, BIGINT)) .addSequencePage(10, 20, 30, 40); BuildSideSetup buildSideSetup = setupBuildSide(nodePartitioningManager, parallelBuild, taskContext, buildPages, Optional.empty(), false, SINGLE_STREAM_SPILLER_FACTORY); JoinBridgeManager lookupSourceFactory = buildSideSetup.getLookupSourceFactoryManager(); // probe factory - RowPagesBuilder probePages = rowPagesBuilder(false, Ints.asList(0), ImmutableList.of(VARCHAR, BIGINT, BIGINT)); + RowPagesBuilder probePages = rowPagesBuilder(Ints.asList(0), ImmutableList.of(VARCHAR, BIGINT, BIGINT)); List probeInput = probePages .addSequencePage(1000, 0, 1000, 2000) .build(); @@ -171,7 +171,7 @@ private void testInnerJoin(boolean parallelBuild) buildLookupSource(executor, buildSideSetup); // expected - MaterializedResult expected = MaterializedResult.resultBuilder(taskContext.getSession(), concat(probePages.getTypesWithoutHash(), buildPages.getTypesWithoutHash())) + MaterializedResult expected = MaterializedResult.resultBuilder(taskContext.getSession(), concat(probePages.getTypes(), buildPages.getTypes())) .row("20", 1020L, 2020L, "20", 30L, 40L) .row("21", 1021L, 2021L, "21", 31L, 41L) .row("22", 1022L, 2022L, "22", 32L, 42L) @@ -193,14 +193,14 @@ public void testInnerJoinWithRunLengthEncodedProbe() TaskContext taskContext = createTaskContext(); // build factory - RowPagesBuilder buildPages = rowPagesBuilder(false, Ints.asList(0), ImmutableList.of(VARCHAR)) + RowPagesBuilder buildPages = rowPagesBuilder(Ints.asList(0), ImmutableList.of(VARCHAR)) .addSequencePage(10, 20) .addSequencePage(10, 21); BuildSideSetup buildSideSetup = setupBuildSide(nodePartitioningManager, false, taskContext, buildPages, Optional.empty(), false, SINGLE_STREAM_SPILLER_FACTORY); JoinBridgeManager lookupSourceFactory = buildSideSetup.getLookupSourceFactoryManager(); // probe factory - RowPagesBuilder probePages = rowPagesBuilder(false, Ints.asList(0), ImmutableList.of(VARCHAR)); + RowPagesBuilder probePages = rowPagesBuilder(Ints.asList(0), ImmutableList.of(VARCHAR)); List probeInput = ImmutableList.of( new Page(RunLengthEncodedBlock.create(VARCHAR, Slices.utf8Slice("20"), 2)), new Page(RunLengthEncodedBlock.create(VARCHAR, Slices.utf8Slice("-1"), 2)), @@ -212,7 +212,7 @@ public void testInnerJoinWithRunLengthEncodedProbe() buildLookupSource(executor, buildSideSetup); // expected - MaterializedResult expected = MaterializedResult.resultBuilder(taskContext.getSession(), concat(probePages.getTypesWithoutHash(), buildPages.getTypesWithoutHash())) + MaterializedResult expected = MaterializedResult.resultBuilder(taskContext.getSession(), concat(probePages.getTypes(), buildPages.getTypes())) .row("20", "20") .row("20", "20") .row("21", "21") @@ -244,13 +244,13 @@ public void testYield() // build with 40 entries int entries = 40; - RowPagesBuilder buildPages = rowPagesBuilder(false, Ints.asList(0), ImmutableList.of(BIGINT)) + RowPagesBuilder buildPages = rowPagesBuilder(Ints.asList(0), ImmutableList.of(BIGINT)) .addSequencePage(entries, 42); BuildSideSetup buildSideSetup = setupBuildSide(nodePartitioningManager, true, taskContext, buildPages, Optional.of(filterFunction), false, SINGLE_STREAM_SPILLER_FACTORY); JoinBridgeManager lookupSourceFactory = buildSideSetup.getLookupSourceFactoryManager(); // probe matching the above 40 entries - RowPagesBuilder probePages = rowPagesBuilder(false, Ints.asList(0), ImmutableList.of(BIGINT)); + RowPagesBuilder probePages = rowPagesBuilder(Ints.asList(0), ImmutableList.of(BIGINT)); List probeInput = probePages.addSequencePage(100, 0).build(); OperatorFactory joinOperatorFactory = spillingJoin( innerJoin(false, false), @@ -385,7 +385,7 @@ private void innerJoinWithSpill(List whenSpill, SingleStreamSpillerFa }); // build factory - RowPagesBuilder buildPages = rowPagesBuilder(false, Ints.asList(0), ImmutableList.of(VARCHAR, BIGINT)) + RowPagesBuilder buildPages = rowPagesBuilder(Ints.asList(0), ImmutableList.of(VARCHAR, BIGINT)) .addSequencePage(4, 20, 200) .addSequencePage(4, 20, 200) .addSequencePage(4, 30, 300) @@ -395,7 +395,7 @@ private void innerJoinWithSpill(List whenSpill, SingleStreamSpillerFa JoinBridgeManager lookupSourceFactoryManager = buildSideSetup.getLookupSourceFactoryManager(); // probe factory - RowPagesBuilder probePages = rowPagesBuilder(false, Ints.asList(0), ImmutableList.of(VARCHAR, BIGINT)) + RowPagesBuilder probePages = rowPagesBuilder(Ints.asList(0), ImmutableList.of(VARCHAR, BIGINT)) .row("20", 123_000L) .row("20", 123_000L) .pageBreak() @@ -469,7 +469,7 @@ private void innerJoinWithSpill(List whenSpill, SingleStreamSpillerFa List actualPages = getPages(pageBuffer); - MaterializedResult expected = MaterializedResult.resultBuilder(taskContext.getSession(), concat(probePages.getTypesWithoutHash(), buildPages.getTypesWithoutHash())) + MaterializedResult expected = MaterializedResult.resultBuilder(taskContext.getSession(), concat(probePages.getTypes(), buildPages.getTypes())) .row("20", 123_000L, "20", 200L) .row("20", 123_000L, "20", 200L) .row("20", 123_000L, "20", 200L) @@ -557,7 +557,7 @@ public void testBuildGracefulSpill() TaskContext taskContext = TestingTaskContext.createTaskContext(executor, scheduledExecutor, TEST_SESSION, taskStateMachine); // build factory - RowPagesBuilder buildPages = rowPagesBuilder(false, Ints.asList(0), ImmutableList.of(VARCHAR, BIGINT)) + RowPagesBuilder buildPages = rowPagesBuilder(Ints.asList(0), ImmutableList.of(VARCHAR, BIGINT)) .addSequencePage(4, 20, 200); DummySpillerFactory buildSpillerFactory = new DummySpillerFactory(); @@ -607,7 +607,7 @@ private void testInnerJoinWithNullProbe(boolean parallelBuild) // build factory List buildTypes = ImmutableList.of(VARCHAR); - RowPagesBuilder buildPages = rowPagesBuilder(false, Ints.asList(0), buildTypes) + RowPagesBuilder buildPages = rowPagesBuilder(Ints.asList(0), buildTypes) .row("a") .row("b") .row("c"); @@ -616,7 +616,7 @@ private void testInnerJoinWithNullProbe(boolean parallelBuild) // probe factory List probeTypes = ImmutableList.of(VARCHAR); - RowPagesBuilder probePages = rowPagesBuilder(false, Ints.asList(0), probeTypes); + RowPagesBuilder probePages = rowPagesBuilder(Ints.asList(0), probeTypes); List probeInput = probePages .row("a") .row((String) null) @@ -631,7 +631,7 @@ private void testInnerJoinWithNullProbe(boolean parallelBuild) buildLookupSource(executor, buildSideSetup); // expected - MaterializedResult expected = MaterializedResult.resultBuilder(taskContext.getSession(), concat(probeTypes, buildPages.getTypesWithoutHash())) + MaterializedResult expected = MaterializedResult.resultBuilder(taskContext.getSession(), concat(probeTypes, buildPages.getTypes())) .row("a", "a") .row("a", "a") .row("b", "b") @@ -652,7 +652,7 @@ private void testInnerJoinWithOutputSingleMatch(boolean parallelBuild) TaskContext taskContext = createTaskContext(); // build factory List buildTypes = ImmutableList.of(VARCHAR); - RowPagesBuilder buildPages = rowPagesBuilder(false, Ints.asList(0), buildTypes) + RowPagesBuilder buildPages = rowPagesBuilder(Ints.asList(0), buildTypes) .row("a") .row("a") .row("b"); @@ -661,7 +661,7 @@ private void testInnerJoinWithOutputSingleMatch(boolean parallelBuild) // probe factory List probeTypes = ImmutableList.of(VARCHAR); - RowPagesBuilder probePages = rowPagesBuilder(false, Ints.asList(0), probeTypes); + RowPagesBuilder probePages = rowPagesBuilder(Ints.asList(0), probeTypes); List probeInput = probePages .row("a") .row("b") @@ -695,7 +695,7 @@ private void testInnerJoinWithNullBuild(boolean parallelBuild) // build factory List buildTypes = ImmutableList.of(VARCHAR); - RowPagesBuilder buildPages = rowPagesBuilder(false, Ints.asList(0), buildTypes) + RowPagesBuilder buildPages = rowPagesBuilder(Ints.asList(0), buildTypes) .row("a") .row((String) null) .row((String) null) @@ -706,7 +706,7 @@ private void testInnerJoinWithNullBuild(boolean parallelBuild) // probe factory List probeTypes = ImmutableList.of(VARCHAR); - RowPagesBuilder probePages = rowPagesBuilder(false, Ints.asList(0), probeTypes); + RowPagesBuilder probePages = rowPagesBuilder(Ints.asList(0), probeTypes); List probeInput = probePages .row("a") .row("b") @@ -741,7 +741,7 @@ private void testInnerJoinWithNullOnBothSides(boolean parallelBuild) // build factory List buildTypes = ImmutableList.of(VARCHAR); - RowPagesBuilder buildPages = rowPagesBuilder(false, Ints.asList(0), buildTypes) + RowPagesBuilder buildPages = rowPagesBuilder(Ints.asList(0), buildTypes) .row("a") .row((String) null) .row((String) null) @@ -752,7 +752,7 @@ private void testInnerJoinWithNullOnBothSides(boolean parallelBuild) // probe factory List probeTypes = ImmutableList.of(VARCHAR); - RowPagesBuilder probePages = rowPagesBuilder(false, Ints.asList(0), probeTypes); + RowPagesBuilder probePages = rowPagesBuilder(Ints.asList(0), probeTypes); List probeInput = probePages .row("a") .row("b") @@ -788,14 +788,14 @@ private void testProbeOuterJoin(boolean parallelBuild) // build factory List buildTypes = ImmutableList.of(VARCHAR, BIGINT, BIGINT); - RowPagesBuilder buildPages = rowPagesBuilder(false, Ints.asList(0), ImmutableList.of(VARCHAR, BIGINT, BIGINT)) + RowPagesBuilder buildPages = rowPagesBuilder(Ints.asList(0), ImmutableList.of(VARCHAR, BIGINT, BIGINT)) .addSequencePage(10, 20, 30, 40); BuildSideSetup buildSideSetup = setupBuildSide(nodePartitioningManager, parallelBuild, taskContext, buildPages, Optional.empty(), false, SINGLE_STREAM_SPILLER_FACTORY); JoinBridgeManager lookupSourceFactory = buildSideSetup.getLookupSourceFactoryManager(); // probe factory List probeTypes = ImmutableList.of(VARCHAR, BIGINT, BIGINT); - RowPagesBuilder probePages = rowPagesBuilder(false, Ints.asList(0), probeTypes); + RowPagesBuilder probePages = rowPagesBuilder(Ints.asList(0), probeTypes); List probeInput = probePages .addSequencePage(15, 20, 1020, 2020) .build(); @@ -843,14 +843,14 @@ private void testProbeOuterJoinWithFilterFunction(boolean parallelBuild) // build factory List buildTypes = ImmutableList.of(VARCHAR, BIGINT, BIGINT); - RowPagesBuilder buildPages = rowPagesBuilder(false, Ints.asList(0), ImmutableList.of(VARCHAR, BIGINT, BIGINT)) + RowPagesBuilder buildPages = rowPagesBuilder(Ints.asList(0), ImmutableList.of(VARCHAR, BIGINT, BIGINT)) .addSequencePage(10, 20, 30, 40); BuildSideSetup buildSideSetup = setupBuildSide(nodePartitioningManager, parallelBuild, taskContext, buildPages, Optional.of(filterFunction), false, SINGLE_STREAM_SPILLER_FACTORY); JoinBridgeManager lookupSourceFactory = buildSideSetup.getLookupSourceFactoryManager(); // probe factory List probeTypes = ImmutableList.of(VARCHAR, BIGINT, BIGINT); - RowPagesBuilder probePages = rowPagesBuilder(false, Ints.asList(0), probeTypes); + RowPagesBuilder probePages = rowPagesBuilder(Ints.asList(0), probeTypes); List probeInput = probePages .addSequencePage(15, 20, 1020, 2020) .build(); @@ -895,7 +895,7 @@ private void testOuterJoinWithNullProbe(boolean parallelBuild) // build factory List buildTypes = ImmutableList.of(VARCHAR); - RowPagesBuilder buildPages = rowPagesBuilder(false, Ints.asList(0), buildTypes) + RowPagesBuilder buildPages = rowPagesBuilder(Ints.asList(0), buildTypes) .row("a") .row("b") .row("c"); @@ -904,7 +904,7 @@ private void testOuterJoinWithNullProbe(boolean parallelBuild) // probe factory List probeTypes = ImmutableList.of(VARCHAR); - RowPagesBuilder probePages = rowPagesBuilder(false, Ints.asList(0), probeTypes); + RowPagesBuilder probePages = rowPagesBuilder(Ints.asList(0), probeTypes); List probeInput = probePages .row("a") .row((String) null) @@ -946,7 +946,7 @@ private void testOuterJoinWithNullProbeAndFilterFunction(boolean parallelBuild) // build factory List buildTypes = ImmutableList.of(VARCHAR); - RowPagesBuilder buildPages = rowPagesBuilder(false, Ints.asList(0), buildTypes) + RowPagesBuilder buildPages = rowPagesBuilder(Ints.asList(0), buildTypes) .row("a") .row("b") .row("c"); @@ -955,7 +955,7 @@ private void testOuterJoinWithNullProbeAndFilterFunction(boolean parallelBuild) // probe factory List probeTypes = ImmutableList.of(VARCHAR); - RowPagesBuilder probePages = rowPagesBuilder(false, Ints.asList(0), probeTypes); + RowPagesBuilder probePages = rowPagesBuilder(Ints.asList(0), probeTypes); List probeInput = probePages .row("a") .row((String) null) @@ -994,7 +994,7 @@ private void testOuterJoinWithNullBuild(boolean parallelBuild) // build factory List buildTypes = ImmutableList.of(VARCHAR); - RowPagesBuilder buildPages = rowPagesBuilder(false, Ints.asList(0), ImmutableList.of(VARCHAR)) + RowPagesBuilder buildPages = rowPagesBuilder(Ints.asList(0), ImmutableList.of(VARCHAR)) .row("a") .row((String) null) .row((String) null) @@ -1005,7 +1005,7 @@ private void testOuterJoinWithNullBuild(boolean parallelBuild) // probe factory List probeTypes = ImmutableList.of(VARCHAR); - RowPagesBuilder probePages = rowPagesBuilder(false, Ints.asList(0), probeTypes); + RowPagesBuilder probePages = rowPagesBuilder(Ints.asList(0), probeTypes); List probeInput = probePages .row("a") .row("b") @@ -1045,7 +1045,7 @@ private void testOuterJoinWithNullBuildAndFilterFunction(boolean parallelBuild) // build factory List buildTypes = ImmutableList.of(VARCHAR); - RowPagesBuilder buildPages = rowPagesBuilder(false, Ints.asList(0), ImmutableList.of(VARCHAR)) + RowPagesBuilder buildPages = rowPagesBuilder(Ints.asList(0), ImmutableList.of(VARCHAR)) .row("a") .row((String) null) .row((String) null) @@ -1056,7 +1056,7 @@ private void testOuterJoinWithNullBuildAndFilterFunction(boolean parallelBuild) // probe factory List probeTypes = ImmutableList.of(VARCHAR); - RowPagesBuilder probePages = rowPagesBuilder(false, Ints.asList(0), probeTypes); + RowPagesBuilder probePages = rowPagesBuilder(Ints.asList(0), probeTypes); List probeInput = probePages .row("a") .row("b") @@ -1091,7 +1091,7 @@ private void testOuterJoinWithNullOnBothSides(boolean parallelBuild) TaskContext taskContext = createTaskContext(); // build factory - RowPagesBuilder buildPages = rowPagesBuilder(false, Ints.asList(0), ImmutableList.of(VARCHAR)) + RowPagesBuilder buildPages = rowPagesBuilder(Ints.asList(0), ImmutableList.of(VARCHAR)) .row("a") .row((String) null) .row((String) null) @@ -1102,7 +1102,7 @@ private void testOuterJoinWithNullOnBothSides(boolean parallelBuild) // probe factory List probeTypes = ImmutableList.of(VARCHAR); - RowPagesBuilder probePages = rowPagesBuilder(false, Ints.asList(0), probeTypes); + RowPagesBuilder probePages = rowPagesBuilder(Ints.asList(0), probeTypes); List probeInput = probePages .row("a") .row("b") @@ -1116,7 +1116,7 @@ private void testOuterJoinWithNullOnBothSides(boolean parallelBuild) buildLookupSource(executor, buildSideSetup); // expected - MaterializedResult expected = MaterializedResult.resultBuilder(taskContext.getSession(), concat(probeTypes, buildPages.getTypesWithoutHash())) + MaterializedResult expected = MaterializedResult.resultBuilder(taskContext.getSession(), concat(probeTypes, buildPages.getTypes())) .row("a", "a") .row("a", "a") .row("b", "b") @@ -1143,7 +1143,7 @@ private void testOuterJoinWithNullOnBothSidesAndFilterFunction(boolean parallelB ImmutableSet.of("a", "c").contains(VARCHAR.getSlice(rightPage.getBlock(0), rightPosition).toStringAscii())); // build factory - RowPagesBuilder buildPages = rowPagesBuilder(false, Ints.asList(0), ImmutableList.of(VARCHAR)) + RowPagesBuilder buildPages = rowPagesBuilder(Ints.asList(0), ImmutableList.of(VARCHAR)) .row("a") .row((String) null) .row((String) null) @@ -1154,7 +1154,7 @@ private void testOuterJoinWithNullOnBothSidesAndFilterFunction(boolean parallelB // probe factory List probeTypes = ImmutableList.of(VARCHAR); - RowPagesBuilder probePages = rowPagesBuilder(false, Ints.asList(0), probeTypes); + RowPagesBuilder probePages = rowPagesBuilder(Ints.asList(0), probeTypes); List probeInput = probePages .row("a") .row("b") @@ -1168,7 +1168,7 @@ private void testOuterJoinWithNullOnBothSidesAndFilterFunction(boolean parallelB buildLookupSource(executor, buildSideSetup); // expected - MaterializedResult expected = MaterializedResult.resultBuilder(taskContext.getSession(), concat(probeTypes, buildPages.getTypesWithoutHash())) + MaterializedResult expected = MaterializedResult.resultBuilder(taskContext.getSession(), concat(probeTypes, buildPages.getTypes())) .row("a", "a") .row("a", "a") .row("b", null) @@ -1190,7 +1190,7 @@ private void testMemoryLimit(boolean parallelBuild) { TaskContext taskContext = TestingTaskContext.createTaskContext(executor, scheduledExecutor, TEST_SESSION, DataSize.ofBytes(100)); - RowPagesBuilder buildPages = rowPagesBuilder(false, Ints.asList(0), ImmutableList.of(VARCHAR, BIGINT, BIGINT)) + RowPagesBuilder buildPages = rowPagesBuilder(Ints.asList(0), ImmutableList.of(VARCHAR, BIGINT, BIGINT)) .addSequencePage(10, 20, 30, 40); BuildSideSetup buildSideSetup = setupBuildSide(nodePartitioningManager, parallelBuild, taskContext, buildPages, Optional.empty(), false, SINGLE_STREAM_SPILLER_FACTORY); instantiateBuildDrivers(buildSideSetup, taskContext); @@ -1409,13 +1409,13 @@ private void testInnerJoinWithEmptyLookupSource(boolean parallelBuild) // build factory List buildTypes = ImmutableList.of(VARCHAR); - RowPagesBuilder buildPages = rowPagesBuilder(false, Ints.asList(0), buildTypes); + RowPagesBuilder buildPages = rowPagesBuilder(Ints.asList(0), buildTypes); BuildSideSetup buildSideSetup = setupBuildSide(nodePartitioningManager, parallelBuild, taskContext, buildPages, Optional.empty(), false, SINGLE_STREAM_SPILLER_FACTORY); JoinBridgeManager lookupSourceFactoryManager = buildSideSetup.getLookupSourceFactoryManager(); // probe factory List probeTypes = ImmutableList.of(VARCHAR); - RowPagesBuilder probePages = rowPagesBuilder(false, Ints.asList(0), probeTypes); + RowPagesBuilder probePages = rowPagesBuilder(Ints.asList(0), probeTypes); OperatorFactory joinOperatorFactory = spillingJoin( innerJoin(false, false), 0, @@ -1452,13 +1452,13 @@ private void testLookupOuterJoinWithEmptyLookupSource(boolean parallelBuild) // build factory List buildTypes = ImmutableList.of(VARCHAR); - RowPagesBuilder buildPages = rowPagesBuilder(false, Ints.asList(0), buildTypes); + RowPagesBuilder buildPages = rowPagesBuilder(Ints.asList(0), buildTypes); BuildSideSetup buildSideSetup = setupBuildSide(nodePartitioningManager, parallelBuild, taskContext, buildPages, Optional.empty(), false, SINGLE_STREAM_SPILLER_FACTORY); JoinBridgeManager lookupSourceFactoryManager = buildSideSetup.getLookupSourceFactoryManager(); // probe factory List probeTypes = ImmutableList.of(VARCHAR); - RowPagesBuilder probePages = rowPagesBuilder(false, Ints.asList(0), probeTypes); + RowPagesBuilder probePages = rowPagesBuilder(Ints.asList(0), probeTypes); OperatorFactory joinOperatorFactory = spillingJoin( lookupOuterJoin(false), 0, @@ -1495,13 +1495,13 @@ private void testProbeOuterJoinWithEmptyLookupSource(boolean parallelBuild) // build factory List buildTypes = ImmutableList.of(VARCHAR); - RowPagesBuilder buildPages = rowPagesBuilder(false, Ints.asList(0), buildTypes); + RowPagesBuilder buildPages = rowPagesBuilder(Ints.asList(0), buildTypes); BuildSideSetup buildSideSetup = setupBuildSide(nodePartitioningManager, parallelBuild, taskContext, buildPages, Optional.empty(), false, SINGLE_STREAM_SPILLER_FACTORY); JoinBridgeManager lookupSourceFactoryManager = buildSideSetup.getLookupSourceFactoryManager(); // probe factory List probeTypes = ImmutableList.of(VARCHAR); - RowPagesBuilder probePages = rowPagesBuilder(false, Ints.asList(0), probeTypes); + RowPagesBuilder probePages = rowPagesBuilder(Ints.asList(0), probeTypes); List probeInput = probePages .row("a") .row("b") @@ -1547,13 +1547,13 @@ private void testFullOuterJoinWithEmptyLookupSource(boolean parallelBuild) // build factory List buildTypes = ImmutableList.of(VARCHAR); - RowPagesBuilder buildPages = rowPagesBuilder(false, Ints.asList(0), buildTypes); + RowPagesBuilder buildPages = rowPagesBuilder(Ints.asList(0), buildTypes); BuildSideSetup buildSideSetup = setupBuildSide(nodePartitioningManager, parallelBuild, taskContext, buildPages, Optional.empty(), false, SINGLE_STREAM_SPILLER_FACTORY); JoinBridgeManager lookupSourceFactoryManager = buildSideSetup.getLookupSourceFactoryManager(); // probe factory List probeTypes = ImmutableList.of(VARCHAR); - RowPagesBuilder probePages = rowPagesBuilder(false, Ints.asList(0), probeTypes); + RowPagesBuilder probePages = rowPagesBuilder(Ints.asList(0), probeTypes); List probeInput = probePages .row("a") .row("b") @@ -1599,7 +1599,7 @@ private void testInnerJoinWithNonEmptyLookupSourceAndEmptyProbe(boolean parallel // build factory List buildTypes = ImmutableList.of(VARCHAR); - RowPagesBuilder buildPages = rowPagesBuilder(false, Ints.asList(0), buildTypes) + RowPagesBuilder buildPages = rowPagesBuilder(Ints.asList(0), buildTypes) .row("a") .row("b") .row((String) null) @@ -1609,7 +1609,7 @@ private void testInnerJoinWithNonEmptyLookupSourceAndEmptyProbe(boolean parallel // probe factory List probeTypes = ImmutableList.of(VARCHAR); - RowPagesBuilder probePages = rowPagesBuilder(false, Ints.asList(0), probeTypes); + RowPagesBuilder probePages = rowPagesBuilder(Ints.asList(0), probeTypes); List probeInput = probePages.build(); OperatorFactory joinOperatorFactory = spillingJoin( innerJoin(false, false), @@ -1686,7 +1686,7 @@ public void testInnerJoinWithBlockingLookupSource() private void testInnerJoinWithBlockingLookupSource(boolean parallelBuild) throws Exception { - RowPagesBuilder probePages = rowPagesBuilder(false, Ints.asList(0), ImmutableList.of(VARCHAR)); + RowPagesBuilder probePages = rowPagesBuilder(Ints.asList(0), ImmutableList.of(VARCHAR)); Page probePage = getOnlyElement(probePages.addSequencePage(1, 0).build()); // join that waits for build side to be collected @@ -1728,13 +1728,13 @@ private OperatorFactory createJoinOperatorFactoryWithBlockingLookupSource(TaskCo { // build factory List buildTypes = ImmutableList.of(VARCHAR); - RowPagesBuilder buildPages = rowPagesBuilder(false, Ints.asList(0), buildTypes); + RowPagesBuilder buildPages = rowPagesBuilder(Ints.asList(0), buildTypes); BuildSideSetup buildSideSetup = setupBuildSide(nodePartitioningManager, parallelBuild, taskContext, buildPages, Optional.empty(), false, SINGLE_STREAM_SPILLER_FACTORY); JoinBridgeManager lookupSourceFactoryManager = buildSideSetup.getLookupSourceFactoryManager(); // probe factory List probeTypes = ImmutableList.of(VARCHAR); - RowPagesBuilder probePages = rowPagesBuilder(false, Ints.asList(0), probeTypes); + RowPagesBuilder probePages = rowPagesBuilder(Ints.asList(0), probeTypes); OperatorFactory joinOperatorFactory = spillingJoin( innerJoin(false, waitForBuild), 0, diff --git a/core/trino-main/src/test/java/io/trino/operator/join/unspilled/BenchmarkHashBuildAndJoinOperators.java b/core/trino-main/src/test/java/io/trino/operator/join/unspilled/BenchmarkHashBuildAndJoinOperators.java index 2a8470071b9f..5f3be9dfb0b1 100644 --- a/core/trino-main/src/test/java/io/trino/operator/join/unspilled/BenchmarkHashBuildAndJoinOperators.java +++ b/core/trino-main/src/test/java/io/trino/operator/join/unspilled/BenchmarkHashBuildAndJoinOperators.java @@ -102,9 +102,6 @@ public static class BuildContext @Param({"varchar", "bigint", "all"}) protected String hashColumns = "bigint"; - @Param({"false", "true"}) - protected boolean buildHashEnabled; - @Param({"1", "5"}) protected int buildRowsRepetition = 1; @@ -166,7 +163,7 @@ public List getBuildPages() protected void initializeBuildPages() { - RowPagesBuilder buildPagesBuilder = rowPagesBuilder(buildHashEnabled, hashChannels, ImmutableList.of(VARCHAR, BIGINT, BIGINT)); + RowPagesBuilder buildPagesBuilder = rowPagesBuilder(hashChannels, ImmutableList.of(VARCHAR, BIGINT, BIGINT)); int maxValue = buildRowsNumber / buildRowsRepetition + 40; int rows = 0; @@ -248,7 +245,7 @@ public List getProbePages() protected void initializeProbePages() { - RowPagesBuilder probePagesBuilder = rowPagesBuilder(buildHashEnabled, hashChannels, ImmutableList.of(VARCHAR, BIGINT, BIGINT)); + RowPagesBuilder probePagesBuilder = rowPagesBuilder(hashChannels, ImmutableList.of(VARCHAR, BIGINT, BIGINT)); Random random = new Random(42); int remainingRows = PROBE_ROWS_NUMBER; diff --git a/core/trino-main/src/test/java/io/trino/operator/join/unspilled/TestHashJoinOperator.java b/core/trino-main/src/test/java/io/trino/operator/join/unspilled/TestHashJoinOperator.java index 72897d6fa01e..69a5a6aec020 100644 --- a/core/trino-main/src/test/java/io/trino/operator/join/unspilled/TestHashJoinOperator.java +++ b/core/trino-main/src/test/java/io/trino/operator/join/unspilled/TestHashJoinOperator.java @@ -120,13 +120,13 @@ private void testInnerJoin(boolean parallelBuild) TaskContext taskContext = createTaskContext(); // build factory - RowPagesBuilder buildPages = rowPagesBuilder(false, Ints.asList(0), ImmutableList.of(VARCHAR, BIGINT, BIGINT)) + RowPagesBuilder buildPages = rowPagesBuilder(Ints.asList(0), ImmutableList.of(VARCHAR, BIGINT, BIGINT)) .addSequencePage(10, 20, 30, 40); BuildSideSetup buildSideSetup = setupBuildSide(nodePartitioningManager, parallelBuild, taskContext, buildPages, Optional.empty()); JoinBridgeManager lookupSourceFactory = buildSideSetup.getLookupSourceFactoryManager(); // probe factory - RowPagesBuilder probePages = rowPagesBuilder(false, Ints.asList(0), ImmutableList.of(VARCHAR, BIGINT, BIGINT)); + RowPagesBuilder probePages = rowPagesBuilder(Ints.asList(0), ImmutableList.of(VARCHAR, BIGINT, BIGINT)); List probeInput = probePages .addSequencePage(1000, 0, 1000, 2000) .build(); @@ -137,7 +137,7 @@ private void testInnerJoin(boolean parallelBuild) buildLookupSource(executor, buildSideSetup); // expected - MaterializedResult expected = MaterializedResult.resultBuilder(taskContext.getSession(), concat(probePages.getTypesWithoutHash(), buildPages.getTypesWithoutHash())) + MaterializedResult expected = MaterializedResult.resultBuilder(taskContext.getSession(), concat(probePages.getTypes(), buildPages.getTypes())) .row("20", 1020L, 2020L, "20", 30L, 40L) .row("21", 1021L, 2021L, "21", 31L, 41L) .row("22", 1022L, 2022L, "22", 32L, 42L) @@ -150,7 +150,7 @@ private void testInnerJoin(boolean parallelBuild) .row("29", 1029L, 2029L, "29", 39L, 49L) .build(); - assertOperatorEquals(joinOperatorFactory, taskContext.addPipelineContext(0, true, true, false).addDriverContext(), probeInput, expected, true, getHashChannels(probePages, buildPages)); + assertOperatorEquals(joinOperatorFactory, taskContext.addPipelineContext(0, true, true, false).addDriverContext(), probeInput, expected, true); } @Test @@ -167,7 +167,7 @@ private void testInnerJoinWithRunLengthEncodedProbe(boolean withFilter, boolean TaskContext taskContext = createTaskContext(); // build factory - RowPagesBuilder buildPages = rowPagesBuilder(false, Ints.asList(0), ImmutableList.of(VARCHAR, BIGINT)) + RowPagesBuilder buildPages = rowPagesBuilder(Ints.asList(0), ImmutableList.of(VARCHAR, BIGINT)) .row("20", 1L) .row("21", 2L) .row("21", 3L); @@ -175,7 +175,7 @@ private void testInnerJoinWithRunLengthEncodedProbe(boolean withFilter, boolean JoinBridgeManager lookupSourceFactory = buildSideSetup.getLookupSourceFactoryManager(); // probe factory - RowPagesBuilder probePagesBuilder = rowPagesBuilder(false, Ints.asList(0), ImmutableList.of(VARCHAR, BIGINT)) + RowPagesBuilder probePagesBuilder = rowPagesBuilder(Ints.asList(0), ImmutableList.of(VARCHAR, BIGINT)) .addBlocksPage( RunLengthEncodedBlock.create(VARCHAR, Slices.utf8Slice("20"), 2), createLongsBlock(42, 43)) @@ -209,7 +209,7 @@ private void testInnerJoinWithRunLengthEncodedProbe(boolean withFilter, boolean assertThat(getJoinOperatorInfo(driverContext).getTotalProbes()).isEqualTo(3); // expected - MaterializedResult expected = MaterializedResult.resultBuilder(taskContext.getSession(), concat(probePagesBuilder.getTypesWithoutHash(), buildPages.getTypesWithoutHash())) + MaterializedResult expected = MaterializedResult.resultBuilder(taskContext.getSession(), concat(probePagesBuilder.getTypes(), buildPages.getTypes())) .row("20", 42L, "20", 1L) .row("20", 43L, "20", 1L) .row("21", 62L, "21", 3L) @@ -252,13 +252,13 @@ private void testYield(boolean singleBigintLookupSource) // build with 40 entries int entries = 40; - RowPagesBuilder buildPages = rowPagesBuilder(false, Ints.asList(0), ImmutableList.of(BIGINT)) + RowPagesBuilder buildPages = rowPagesBuilder(Ints.asList(0), ImmutableList.of(BIGINT)) .addSequencePage(entries, 42); BuildSideSetup buildSideSetup = setupBuildSide(nodePartitioningManager, true, taskContext, buildPages, Optional.of(filterFunction), singleBigintLookupSource); JoinBridgeManager lookupSourceFactory = buildSideSetup.getLookupSourceFactoryManager(); // probe matching the above 40 entries - RowPagesBuilder probePages = rowPagesBuilder(false, Ints.asList(0), ImmutableList.of(BIGINT)); + RowPagesBuilder probePages = rowPagesBuilder(Ints.asList(0), ImmutableList.of(BIGINT)); List probeInput = probePages.addSequencePage(100, 0).build(); OperatorFactory joinOperatorFactory = join( innerJoin(false, false), @@ -316,7 +316,7 @@ private void testInnerJoinWithNullProbe(boolean parallelBuild, boolean singleBig // build factory List buildTypes = ImmutableList.of(BIGINT); - RowPagesBuilder buildPages = rowPagesBuilder(false, Ints.asList(0), buildTypes) + RowPagesBuilder buildPages = rowPagesBuilder(Ints.asList(0), buildTypes) .row(1L) .row(2L) .row(3L); @@ -325,7 +325,7 @@ private void testInnerJoinWithNullProbe(boolean parallelBuild, boolean singleBig // probe factory List probeTypes = ImmutableList.of(BIGINT); - RowPagesBuilder probePages = rowPagesBuilder(false, Ints.asList(0), probeTypes); + RowPagesBuilder probePages = rowPagesBuilder(Ints.asList(0), probeTypes); List probeInput = probePages .row(1L) .row((String) null) @@ -340,13 +340,13 @@ private void testInnerJoinWithNullProbe(boolean parallelBuild, boolean singleBig buildLookupSource(executor, buildSideSetup); // expected - MaterializedResult expected = MaterializedResult.resultBuilder(taskContext.getSession(), concat(probeTypes, buildPages.getTypesWithoutHash())) + MaterializedResult expected = MaterializedResult.resultBuilder(taskContext.getSession(), concat(probeTypes, buildPages.getTypes())) .row(1L, 1L) .row(1L, 1L) .row(2L, 2L) .build(); - assertOperatorEquals(joinOperatorFactory, taskContext.addPipelineContext(0, true, true, false).addDriverContext(), probeInput, expected, true, getHashChannels(probePages, buildPages)); + assertOperatorEquals(joinOperatorFactory, taskContext.addPipelineContext(0, true, true, false).addDriverContext(), probeInput, expected, true); } @Test @@ -363,7 +363,7 @@ private void testInnerJoinWithOutputSingleMatch(boolean parallelBuild, boolean s TaskContext taskContext = createTaskContext(); // build factory List buildTypes = ImmutableList.of(BIGINT); - RowPagesBuilder buildPages = rowPagesBuilder(false, Ints.asList(0), buildTypes) + RowPagesBuilder buildPages = rowPagesBuilder(Ints.asList(0), buildTypes) .row(1L) .row(1L) .row(2L); @@ -372,7 +372,7 @@ private void testInnerJoinWithOutputSingleMatch(boolean parallelBuild, boolean s // probe factory List probeTypes = ImmutableList.of(BIGINT); - RowPagesBuilder probePages = rowPagesBuilder(false, Ints.asList(0), probeTypes); + RowPagesBuilder probePages = rowPagesBuilder(Ints.asList(0), probeTypes); List probeInput = probePages .row(1L) .row(2L) @@ -390,7 +390,7 @@ private void testInnerJoinWithOutputSingleMatch(boolean parallelBuild, boolean s .row(2L, 2L) .build(); - assertOperatorEquals(joinOperatorFactory, taskContext.addPipelineContext(0, true, true, false).addDriverContext(), probeInput, expected, true, getHashChannels(probePages, buildPages)); + assertOperatorEquals(joinOperatorFactory, taskContext.addPipelineContext(0, true, true, false).addDriverContext(), probeInput, expected, true); } @Test @@ -406,7 +406,7 @@ private void testInnerJoinWithNullBuild(boolean parallelBuild) // build factory List buildTypes = ImmutableList.of(BIGINT); - RowPagesBuilder buildPages = rowPagesBuilder(false, Ints.asList(0), buildTypes) + RowPagesBuilder buildPages = rowPagesBuilder(Ints.asList(0), buildTypes) .row(1L) .row((String) null) .row((String) null) @@ -417,7 +417,7 @@ private void testInnerJoinWithNullBuild(boolean parallelBuild) // probe factory List probeTypes = ImmutableList.of(BIGINT); - RowPagesBuilder probePages = rowPagesBuilder(false, Ints.asList(0), probeTypes); + RowPagesBuilder probePages = rowPagesBuilder(Ints.asList(0), probeTypes); List probeInput = probePages .row(1L) .row(2L) @@ -436,7 +436,7 @@ private void testInnerJoinWithNullBuild(boolean parallelBuild) .row(2L, 2L) .build(); - assertOperatorEquals(joinOperatorFactory, taskContext.addPipelineContext(0, true, true, false).addDriverContext(), probeInput, expected, true, getHashChannels(probePages, buildPages)); + assertOperatorEquals(joinOperatorFactory, taskContext.addPipelineContext(0, true, true, false).addDriverContext(), probeInput, expected, true); } @Test @@ -452,7 +452,7 @@ private void testInnerJoinWithNullOnBothSides(boolean parallelBuild) // build factory List buildTypes = ImmutableList.of(BIGINT); - RowPagesBuilder buildPages = rowPagesBuilder(false, Ints.asList(0), buildTypes) + RowPagesBuilder buildPages = rowPagesBuilder(Ints.asList(0), buildTypes) .row(1L) .row((String) null) .row((String) null) @@ -463,7 +463,7 @@ private void testInnerJoinWithNullOnBothSides(boolean parallelBuild) // probe factory List probeTypes = ImmutableList.of(BIGINT); - RowPagesBuilder probePages = rowPagesBuilder(false, Ints.asList(0), probeTypes); + RowPagesBuilder probePages = rowPagesBuilder(Ints.asList(0), probeTypes); List probeInput = probePages .row(1L) .row(2L) @@ -483,7 +483,7 @@ private void testInnerJoinWithNullOnBothSides(boolean parallelBuild) .row(2L, 2L) .build(); - assertOperatorEquals(joinOperatorFactory, taskContext.addPipelineContext(0, true, true, false).addDriverContext(), probeInput, expected, true, getHashChannels(probePages, buildPages)); + assertOperatorEquals(joinOperatorFactory, taskContext.addPipelineContext(0, true, true, false).addDriverContext(), probeInput, expected, true); } @Test @@ -499,14 +499,14 @@ private void testProbeOuterJoin(boolean parallelBuild) // build factory List buildTypes = ImmutableList.of(VARCHAR, BIGINT, BIGINT); - RowPagesBuilder buildPages = rowPagesBuilder(false, Ints.asList(0), ImmutableList.of(VARCHAR, BIGINT, BIGINT)) + RowPagesBuilder buildPages = rowPagesBuilder(Ints.asList(0), ImmutableList.of(VARCHAR, BIGINT, BIGINT)) .addSequencePage(10, 20, 30, 40); BuildSideSetup buildSideSetup = setupBuildSide(nodePartitioningManager, parallelBuild, taskContext, buildPages, Optional.empty()); JoinBridgeManager lookupSourceFactory = buildSideSetup.getLookupSourceFactoryManager(); // probe factory List probeTypes = ImmutableList.of(VARCHAR, BIGINT, BIGINT); - RowPagesBuilder probePages = rowPagesBuilder(false, Ints.asList(0), probeTypes); + RowPagesBuilder probePages = rowPagesBuilder(Ints.asList(0), probeTypes); List probeInput = probePages .addSequencePage(15, 20, 1020, 2020) .build(); @@ -535,7 +535,7 @@ private void testProbeOuterJoin(boolean parallelBuild) .row("34", 1034L, 2034L, null, null, null) .build(); - assertOperatorEquals(joinOperatorFactory, taskContext.addPipelineContext(0, true, true, false).addDriverContext(), probeInput, expected, true, getHashChannels(probePages, buildPages)); + assertOperatorEquals(joinOperatorFactory, taskContext.addPipelineContext(0, true, true, false).addDriverContext(), probeInput, expected, true); } @Test @@ -554,14 +554,14 @@ private void testProbeOuterJoinWithFilterFunction(boolean parallelBuild) // build factory List buildTypes = ImmutableList.of(VARCHAR, BIGINT, BIGINT); - RowPagesBuilder buildPages = rowPagesBuilder(false, Ints.asList(0), ImmutableList.of(VARCHAR, BIGINT, BIGINT)) + RowPagesBuilder buildPages = rowPagesBuilder(Ints.asList(0), ImmutableList.of(VARCHAR, BIGINT, BIGINT)) .addSequencePage(10, 20, 30, 40); BuildSideSetup buildSideSetup = setupBuildSide(nodePartitioningManager, parallelBuild, taskContext, buildPages, Optional.of(filterFunction)); JoinBridgeManager lookupSourceFactory = buildSideSetup.getLookupSourceFactoryManager(); // probe factory List probeTypes = ImmutableList.of(VARCHAR, BIGINT, BIGINT); - RowPagesBuilder probePages = rowPagesBuilder(false, Ints.asList(0), probeTypes); + RowPagesBuilder probePages = rowPagesBuilder(Ints.asList(0), probeTypes); List probeInput = probePages .addSequencePage(15, 20, 1020, 2020) .build(); @@ -590,7 +590,7 @@ private void testProbeOuterJoinWithFilterFunction(boolean parallelBuild) .row("34", 1034L, 2034L, null, null, null) .build(); - assertOperatorEquals(joinOperatorFactory, taskContext.addPipelineContext(0, true, true, false).addDriverContext(), probeInput, expected, true, getHashChannels(probePages, buildPages)); + assertOperatorEquals(joinOperatorFactory, taskContext.addPipelineContext(0, true, true, false).addDriverContext(), probeInput, expected, true); } @Test @@ -608,7 +608,7 @@ private void testOuterJoinWithNullProbe(boolean parallelBuild, boolean singleBig // build factory List buildTypes = ImmutableList.of(BIGINT); - RowPagesBuilder buildPages = rowPagesBuilder(false, Ints.asList(0), buildTypes) + RowPagesBuilder buildPages = rowPagesBuilder(Ints.asList(0), buildTypes) .row(1L) .row(2L) .row(3L); @@ -617,7 +617,7 @@ private void testOuterJoinWithNullProbe(boolean parallelBuild, boolean singleBig // probe factory List probeTypes = ImmutableList.of(BIGINT); - RowPagesBuilder probePages = rowPagesBuilder(false, Ints.asList(0), probeTypes); + RowPagesBuilder probePages = rowPagesBuilder(Ints.asList(0), probeTypes); List probeInput = probePages .row(1L) .row((String) null) @@ -640,7 +640,7 @@ private void testOuterJoinWithNullProbe(boolean parallelBuild, boolean singleBig .row(2L, 2L) .build(); - assertOperatorEquals(joinOperatorFactory, taskContext.addPipelineContext(0, true, true, false).addDriverContext(), probeInput, expected, true, getHashChannels(probePages, buildPages)); + assertOperatorEquals(joinOperatorFactory, taskContext.addPipelineContext(0, true, true, false).addDriverContext(), probeInput, expected, true); } @Test @@ -661,7 +661,7 @@ private void testOuterJoinWithNullProbeAndFilterFunction(boolean parallelBuild, // build factory List buildTypes = ImmutableList.of(BIGINT); - RowPagesBuilder buildPages = rowPagesBuilder(false, Ints.asList(0), buildTypes) + RowPagesBuilder buildPages = rowPagesBuilder(Ints.asList(0), buildTypes) .row(1L) .row(2L) .row(3L); @@ -670,7 +670,7 @@ private void testOuterJoinWithNullProbeAndFilterFunction(boolean parallelBuild, // probe factory List probeTypes = ImmutableList.of(BIGINT); - RowPagesBuilder probePages = rowPagesBuilder(false, Ints.asList(0), probeTypes); + RowPagesBuilder probePages = rowPagesBuilder(Ints.asList(0), probeTypes); List probeInput = probePages .row(1L) .row((String) null) @@ -693,7 +693,7 @@ private void testOuterJoinWithNullProbeAndFilterFunction(boolean parallelBuild, .row(2L, null) .build(); - assertOperatorEquals(joinOperatorFactory, taskContext.addPipelineContext(0, true, true, false).addDriverContext(), probeInput, expected, true, getHashChannels(probePages, buildPages)); + assertOperatorEquals(joinOperatorFactory, taskContext.addPipelineContext(0, true, true, false).addDriverContext(), probeInput, expected, true); } @Test @@ -711,7 +711,7 @@ private void testOuterJoinWithNullBuild(boolean parallelBuild, boolean singleBig // build factory List buildTypes = ImmutableList.of(BIGINT); - RowPagesBuilder buildPages = rowPagesBuilder(false, Ints.asList(0), ImmutableList.of(BIGINT)) + RowPagesBuilder buildPages = rowPagesBuilder(Ints.asList(0), ImmutableList.of(BIGINT)) .row(1L) .row((String) null) .row((String) null) @@ -722,7 +722,7 @@ private void testOuterJoinWithNullBuild(boolean parallelBuild, boolean singleBig // probe factory List probeTypes = ImmutableList.of(BIGINT); - RowPagesBuilder probePages = rowPagesBuilder(false, Ints.asList(0), probeTypes); + RowPagesBuilder probePages = rowPagesBuilder(Ints.asList(0), probeTypes); List probeInput = probePages .row(1L) .row(2L) @@ -742,7 +742,7 @@ private void testOuterJoinWithNullBuild(boolean parallelBuild, boolean singleBig .row(3L, null) .build(); - assertOperatorEquals(joinOperatorFactory, taskContext.addPipelineContext(0, true, true, false).addDriverContext(), probeInput, expected, true, getHashChannels(probePages, buildPages)); + assertOperatorEquals(joinOperatorFactory, taskContext.addPipelineContext(0, true, true, false).addDriverContext(), probeInput, expected, true); } @Test @@ -764,7 +764,7 @@ private void testOuterJoinWithNullBuildAndFilterFunction(boolean parallelBuild, // build factory List buildTypes = ImmutableList.of(BIGINT); - RowPagesBuilder buildPages = rowPagesBuilder(false, Ints.asList(0), ImmutableList.of(BIGINT)) + RowPagesBuilder buildPages = rowPagesBuilder(Ints.asList(0), ImmutableList.of(BIGINT)) .row(1L) .row((String) null) .row((String) null) @@ -775,7 +775,7 @@ private void testOuterJoinWithNullBuildAndFilterFunction(boolean parallelBuild, // probe factory List probeTypes = ImmutableList.of(BIGINT); - RowPagesBuilder probePages = rowPagesBuilder(false, Ints.asList(0), probeTypes); + RowPagesBuilder probePages = rowPagesBuilder(Ints.asList(0), probeTypes); List probeInput = probePages .row(1L) .row(2L) @@ -795,7 +795,7 @@ private void testOuterJoinWithNullBuildAndFilterFunction(boolean parallelBuild, .row(3L, null) .build(); - assertOperatorEquals(joinOperatorFactory, taskContext.addPipelineContext(0, true, true, false).addDriverContext(), probeInput, expected, true, getHashChannels(probePages, buildPages)); + assertOperatorEquals(joinOperatorFactory, taskContext.addPipelineContext(0, true, true, false).addDriverContext(), probeInput, expected, true); } @Test @@ -812,7 +812,7 @@ private void testOuterJoinWithNullOnBothSides(boolean parallelBuild, boolean sin TaskContext taskContext = createTaskContext(); // build factory - RowPagesBuilder buildPages = rowPagesBuilder(false, Ints.asList(0), ImmutableList.of(BIGINT)) + RowPagesBuilder buildPages = rowPagesBuilder(Ints.asList(0), ImmutableList.of(BIGINT)) .row(1L) .row((String) null) .row((String) null) @@ -823,7 +823,7 @@ private void testOuterJoinWithNullOnBothSides(boolean parallelBuild, boolean sin // probe factory List probeTypes = ImmutableList.of(BIGINT); - RowPagesBuilder probePages = rowPagesBuilder(false, Ints.asList(0), probeTypes); + RowPagesBuilder probePages = rowPagesBuilder(Ints.asList(0), probeTypes); List probeInput = probePages .row(1L) .row(2L) @@ -837,7 +837,7 @@ private void testOuterJoinWithNullOnBothSides(boolean parallelBuild, boolean sin buildLookupSource(executor, buildSideSetup); // expected - MaterializedResult expected = MaterializedResult.resultBuilder(taskContext.getSession(), concat(probeTypes, buildPages.getTypesWithoutHash())) + MaterializedResult expected = MaterializedResult.resultBuilder(taskContext.getSession(), concat(probeTypes, buildPages.getTypes())) .row(1L, 1L) .row(1L, 1L) .row(2L, 2L) @@ -845,7 +845,7 @@ private void testOuterJoinWithNullOnBothSides(boolean parallelBuild, boolean sin .row(3L, null) .build(); - assertOperatorEquals(joinOperatorFactory, taskContext.addPipelineContext(0, true, true, false).addDriverContext(), probeInput, expected, true, getHashChannels(probePages, buildPages)); + assertOperatorEquals(joinOperatorFactory, taskContext.addPipelineContext(0, true, true, false).addDriverContext(), probeInput, expected, true); } @Test @@ -866,7 +866,7 @@ private void testOuterJoinWithNullOnBothSidesAndFilterFunction(boolean parallelB ImmutableSet.of(1L, 3L).contains(BIGINT.getLong(rightPage.getBlock(0), rightPosition))); // build factory - RowPagesBuilder buildPages = rowPagesBuilder(false, Ints.asList(0), ImmutableList.of(BIGINT)) + RowPagesBuilder buildPages = rowPagesBuilder(Ints.asList(0), ImmutableList.of(BIGINT)) .row(1L) .row((String) null) .row((String) null) @@ -877,7 +877,7 @@ private void testOuterJoinWithNullOnBothSidesAndFilterFunction(boolean parallelB // probe factory List probeTypes = ImmutableList.of(BIGINT); - RowPagesBuilder probePages = rowPagesBuilder(false, Ints.asList(0), probeTypes); + RowPagesBuilder probePages = rowPagesBuilder(Ints.asList(0), probeTypes); List probeInput = probePages .row(1L) .row(2L) @@ -891,7 +891,7 @@ private void testOuterJoinWithNullOnBothSidesAndFilterFunction(boolean parallelB buildLookupSource(executor, buildSideSetup); // expected - MaterializedResult expected = MaterializedResult.resultBuilder(taskContext.getSession(), concat(probeTypes, buildPages.getTypesWithoutHash())) + MaterializedResult expected = MaterializedResult.resultBuilder(taskContext.getSession(), concat(probeTypes, buildPages.getTypes())) .row(1L, 1L) .row(1L, 1L) .row(2L, null) @@ -899,7 +899,7 @@ private void testOuterJoinWithNullOnBothSidesAndFilterFunction(boolean parallelB .row(3L, null) .build(); - assertOperatorEquals(joinOperatorFactory, taskContext.addPipelineContext(0, true, true, false).addDriverContext(), probeInput, expected, true, getHashChannels(probePages, buildPages)); + assertOperatorEquals(joinOperatorFactory, taskContext.addPipelineContext(0, true, true, false).addDriverContext(), probeInput, expected, true); } @Test @@ -913,7 +913,7 @@ private void testMemoryLimit(boolean parallelBuild) { TaskContext taskContext = TestingTaskContext.createTaskContext(executor, scheduledExecutor, TEST_SESSION, DataSize.ofBytes(100)); - RowPagesBuilder buildPages = rowPagesBuilder(false, Ints.asList(0), ImmutableList.of(VARCHAR, BIGINT, BIGINT)) + RowPagesBuilder buildPages = rowPagesBuilder(Ints.asList(0), ImmutableList.of(VARCHAR, BIGINT, BIGINT)) .addSequencePage(10, 20, 30, 40); BuildSideSetup buildSideSetup = setupBuildSide(nodePartitioningManager, parallelBuild, taskContext, buildPages, Optional.empty()); instantiateBuildDrivers(buildSideSetup, taskContext); @@ -938,13 +938,13 @@ private void testInnerJoinWithEmptyLookupSource(boolean parallelBuild, boolean s // build factory List buildTypes = ImmutableList.of(BIGINT); - RowPagesBuilder buildPages = rowPagesBuilder(false, Ints.asList(0), buildTypes); + RowPagesBuilder buildPages = rowPagesBuilder(Ints.asList(0), buildTypes); BuildSideSetup buildSideSetup = setupBuildSide(nodePartitioningManager, parallelBuild, taskContext, buildPages, Optional.empty(), singleBigintLookupSource); JoinBridgeManager lookupSourceFactoryManager = buildSideSetup.getLookupSourceFactoryManager(); // probe factory List probeTypes = ImmutableList.of(BIGINT); - RowPagesBuilder probePages = rowPagesBuilder(false, Ints.asList(0), probeTypes); + RowPagesBuilder probePages = rowPagesBuilder(Ints.asList(0), probeTypes); OperatorFactory joinOperatorFactory = join( innerJoin(false, false), 0, @@ -981,13 +981,13 @@ private void testLookupOuterJoinWithEmptyLookupSource(boolean parallelBuild, boo // build factory List buildTypes = ImmutableList.of(BIGINT); - RowPagesBuilder buildPages = rowPagesBuilder(false, Ints.asList(0), buildTypes); + RowPagesBuilder buildPages = rowPagesBuilder(Ints.asList(0), buildTypes); BuildSideSetup buildSideSetup = setupBuildSide(nodePartitioningManager, parallelBuild, taskContext, buildPages, Optional.empty(), singleBigintLookupSource); JoinBridgeManager lookupSourceFactoryManager = buildSideSetup.getLookupSourceFactoryManager(); // probe factory List probeTypes = ImmutableList.of(BIGINT); - RowPagesBuilder probePages = rowPagesBuilder(false, Ints.asList(0), probeTypes); + RowPagesBuilder probePages = rowPagesBuilder(Ints.asList(0), probeTypes); OperatorFactory joinOperatorFactory = join( JoinOperatorType.lookupOuterJoin(false), 0, @@ -1024,13 +1024,13 @@ private void testProbeOuterJoinWithEmptyLookupSource(boolean parallelBuild, bool // build factory List buildTypes = ImmutableList.of(BIGINT); - RowPagesBuilder buildPages = rowPagesBuilder(false, Ints.asList(0), buildTypes); + RowPagesBuilder buildPages = rowPagesBuilder(Ints.asList(0), buildTypes); BuildSideSetup buildSideSetup = setupBuildSide(nodePartitioningManager, parallelBuild, taskContext, buildPages, Optional.empty(), singleBigintLookupSource); JoinBridgeManager lookupSourceFactoryManager = buildSideSetup.getLookupSourceFactoryManager(); // probe factory List probeTypes = ImmutableList.of(BIGINT); - RowPagesBuilder probePages = rowPagesBuilder(false, Ints.asList(0), probeTypes); + RowPagesBuilder probePages = rowPagesBuilder(Ints.asList(0), probeTypes); List probeInput = probePages .row(1L) .row(2L) @@ -1058,7 +1058,7 @@ private void testProbeOuterJoinWithEmptyLookupSource(boolean parallelBuild, bool .row(null, null) .row(3L, null) .build(); - assertOperatorEquals(joinOperatorFactory, taskContext.addPipelineContext(0, true, true, false).addDriverContext(), probeInput, expected, true, getHashChannels(probePages, buildPages)); + assertOperatorEquals(joinOperatorFactory, taskContext.addPipelineContext(0, true, true, false).addDriverContext(), probeInput, expected, true); } @Test @@ -1076,13 +1076,13 @@ private void testFullOuterJoinWithEmptyLookupSource(boolean parallelBuild, boole // build factory List buildTypes = ImmutableList.of(BIGINT); - RowPagesBuilder buildPages = rowPagesBuilder(false, Ints.asList(0), buildTypes); + RowPagesBuilder buildPages = rowPagesBuilder(Ints.asList(0), buildTypes); BuildSideSetup buildSideSetup = setupBuildSide(nodePartitioningManager, parallelBuild, taskContext, buildPages, Optional.empty(), singleBigintLookupSource); JoinBridgeManager lookupSourceFactoryManager = buildSideSetup.getLookupSourceFactoryManager(); // probe factory List probeTypes = ImmutableList.of(BIGINT); - RowPagesBuilder probePages = rowPagesBuilder(false, Ints.asList(0), probeTypes); + RowPagesBuilder probePages = rowPagesBuilder(Ints.asList(0), probeTypes); List probeInput = probePages .row(1L) .row(2L) @@ -1110,7 +1110,7 @@ private void testFullOuterJoinWithEmptyLookupSource(boolean parallelBuild, boole .row(null, null) .row(3L, null) .build(); - assertOperatorEquals(joinOperatorFactory, taskContext.addPipelineContext(0, true, true, false).addDriverContext(), probeInput, expected, true, getHashChannels(probePages, buildPages)); + assertOperatorEquals(joinOperatorFactory, taskContext.addPipelineContext(0, true, true, false).addDriverContext(), probeInput, expected, true); } @Test @@ -1128,7 +1128,7 @@ private void testInnerJoinWithNonEmptyLookupSourceAndEmptyProbe(boolean parallel // build factory List buildTypes = ImmutableList.of(BIGINT); - RowPagesBuilder buildPages = rowPagesBuilder(false, Ints.asList(0), buildTypes) + RowPagesBuilder buildPages = rowPagesBuilder(Ints.asList(0), buildTypes) .row(1L) .row(2L) .row((String) null) @@ -1138,7 +1138,7 @@ private void testInnerJoinWithNonEmptyLookupSourceAndEmptyProbe(boolean parallel // probe factory List probeTypes = ImmutableList.of(BIGINT); - RowPagesBuilder probePages = rowPagesBuilder(false, Ints.asList(0), probeTypes); + RowPagesBuilder probePages = rowPagesBuilder(Ints.asList(0), probeTypes); List probeInput = probePages.build(); OperatorFactory joinOperatorFactory = join( innerJoin(false, false), @@ -1156,7 +1156,7 @@ private void testInnerJoinWithNonEmptyLookupSourceAndEmptyProbe(boolean parallel // expected MaterializedResult expected = MaterializedResult.resultBuilder(taskContext.getSession(), concat(probeTypes, buildTypes)).build(); - assertOperatorEquals(joinOperatorFactory, taskContext.addPipelineContext(0, true, true, false).addDriverContext(), probeInput, expected, true, getHashChannels(probePages, buildPages)); + assertOperatorEquals(joinOperatorFactory, taskContext.addPipelineContext(0, true, true, false).addDriverContext(), probeInput, expected, true); } @Test @@ -1213,7 +1213,7 @@ public void testInnerJoinWithBlockingLookupSource() private void testInnerJoinWithBlockingLookupSource(boolean parallelBuild) throws Exception { - RowPagesBuilder probePages = rowPagesBuilder(false, Ints.asList(0), ImmutableList.of(VARCHAR)); + RowPagesBuilder probePages = rowPagesBuilder(Ints.asList(0), ImmutableList.of(VARCHAR)); Page probePage = getOnlyElement(probePages.addSequencePage(1, 0).build()); // join that waits for build side to be collected @@ -1255,13 +1255,13 @@ private OperatorFactory createJoinOperatorFactoryWithBlockingLookupSource(TaskCo { // build factory List buildTypes = ImmutableList.of(VARCHAR); - RowPagesBuilder buildPages = rowPagesBuilder(false, Ints.asList(0), buildTypes); + RowPagesBuilder buildPages = rowPagesBuilder(Ints.asList(0), buildTypes); BuildSideSetup buildSideSetup = setupBuildSide(nodePartitioningManager, parallelBuild, taskContext, buildPages, Optional.empty()); JoinBridgeManager lookupSourceFactoryManager = buildSideSetup.getLookupSourceFactoryManager(); // probe factory List probeTypes = ImmutableList.of(VARCHAR); - RowPagesBuilder probePages = rowPagesBuilder(false, Ints.asList(0), probeTypes); + RowPagesBuilder probePages = rowPagesBuilder(Ints.asList(0), probeTypes); OperatorFactory joinOperatorFactory = join( innerJoin(false, waitForBuild), 0, @@ -1283,18 +1283,6 @@ private TaskContext createTaskContext() return TestingTaskContext.createTaskContext(executor, scheduledExecutor, TEST_SESSION); } - private static List getHashChannels(RowPagesBuilder probe, RowPagesBuilder build) - { - ImmutableList.Builder hashChannels = ImmutableList.builder(); - if (probe.getHashChannel().isPresent()) { - hashChannels.add(probe.getHashChannel().get()); - } - if (build.getHashChannel().isPresent()) { - hashChannels.add(probe.getTypes().size() + build.getHashChannel().get()); - } - return hashChannels.build(); - } - private OperatorFactory probeOuterJoinOperatorFactory( JoinBridgeManager lookupSourceFactoryManager, RowPagesBuilder probePages,