-
Notifications
You must be signed in to change notification settings - Fork 3.6k
Measure split listing time per table per query. #14713
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -42,6 +42,7 @@ | |
| import java.util.Optional; | ||
| import java.util.OptionalDouble; | ||
| import java.util.Set; | ||
| import java.util.concurrent.ConcurrentHashMap; | ||
| import java.util.concurrent.Executor; | ||
| import java.util.concurrent.atomic.AtomicBoolean; | ||
| import java.util.concurrent.atomic.AtomicLong; | ||
|
|
@@ -51,6 +52,7 @@ | |
| import static com.google.common.base.MoreObjects.toStringHelper; | ||
| import static com.google.common.base.Preconditions.checkArgument; | ||
| import static com.google.common.base.Preconditions.checkState; | ||
| import static com.google.common.collect.ImmutableList.toImmutableList; | ||
| import static io.airlift.units.DataSize.succinctBytes; | ||
| import static io.airlift.units.Duration.succinctDuration; | ||
| import static io.trino.execution.StageState.ABORTED; | ||
|
|
@@ -84,6 +86,7 @@ public class StageStateMachine | |
|
|
||
| private final AtomicReference<DateTime> schedulingComplete = new AtomicReference<>(); | ||
| private final Distribution getSplitDistribution = new Distribution(); | ||
| private final Map<PlanNodeId, Distribution> tableGetSplitDistribution = new ConcurrentHashMap<>(); | ||
|
||
|
|
||
| private final AtomicLong peakUserMemory = new AtomicLong(); | ||
| private final AtomicLong peakRevocableMemory = new AtomicLong(); | ||
|
|
@@ -546,10 +549,15 @@ public StageInfo getStageInfo(Supplier<Iterable<TaskInfo>> taskInfosSupplier) | |
| } | ||
| } | ||
|
|
||
| List<TableGetSplitDistribution> tableGetSplitDistributions = tableGetSplitDistribution.entrySet().stream() | ||
| .map(entry -> new TableGetSplitDistribution(entry.getKey(), entry.getValue().snapshot())) | ||
| .collect(toImmutableList()); | ||
|
|
||
| StageStats stageStats = new StageStats( | ||
| schedulingComplete.get(), | ||
| getSplitDistribution.snapshot(), | ||
|
|
||
| tableGetSplitDistributions, | ||
| totalTasks, | ||
| runningTasks, | ||
| completedTasks, | ||
|
|
@@ -638,10 +646,13 @@ public StageInfo getStageInfo(Supplier<Iterable<TaskInfo>> taskInfosSupplier) | |
| failureInfo); | ||
| } | ||
|
|
||
| public void recordGetSplitTime(long startNanos) | ||
| public void recordGetSplitTime(PlanNodeId planNodeId, long startNanos) | ||
| { | ||
| requireNonNull(planNodeId, "planNodeId is null"); | ||
| long elapsedNanos = System.nanoTime() - startNanos; | ||
| getSplitDistribution.add(elapsedNanos); | ||
| tableGetSplitDistribution.computeIfAbsent(planNodeId, (key) -> new Distribution()) | ||
| .add(elapsedNanos); | ||
| scheduledStats.getGetSplitTime().add(elapsedNanos, NANOSECONDS); | ||
| } | ||
|
|
||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,53 @@ | ||
| /* | ||
| * Licensed under the Apache License, Version 2.0 (the "License"); | ||
| * you may not use this file except in compliance with the License. | ||
| * You may obtain a copy of the License at | ||
| * | ||
| * http://www.apache.org/licenses/LICENSE-2.0 | ||
| * | ||
| * Unless required by applicable law or agreed to in writing, software | ||
| * distributed under the License is distributed on an "AS IS" BASIS, | ||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
| * See the License for the specific language governing permissions and | ||
| * limitations under the License. | ||
| */ | ||
| package io.trino.execution; | ||
|
|
||
| import com.fasterxml.jackson.annotation.JsonCreator; | ||
| import com.fasterxml.jackson.annotation.JsonProperty; | ||
| import io.airlift.stats.Distribution.DistributionSnapshot; | ||
| import io.trino.sql.planner.plan.PlanNodeId; | ||
|
|
||
| import javax.annotation.concurrent.Immutable; | ||
|
|
||
| import static java.util.Objects.requireNonNull; | ||
|
|
||
| @Immutable | ||
| public class TableGetSplitDistribution | ||
Dith3r marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
| { | ||
| private final PlanNodeId planNodeId; | ||
| private final DistributionSnapshot splitDistribution; | ||
|
|
||
| @JsonCreator | ||
| public TableGetSplitDistribution( | ||
| @JsonProperty("planNodeId") PlanNodeId planNodeId, | ||
| @JsonProperty("splitDistribution") DistributionSnapshot splitDistribution) | ||
| { | ||
| requireNonNull(planNodeId, "planNodeId is null"); | ||
| requireNonNull(splitDistribution, "splitDistribution is null"); | ||
| this.planNodeId = planNodeId; | ||
Dith3r marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
| this.splitDistribution = splitDistribution; | ||
| } | ||
|
|
||
| @JsonProperty | ||
| public PlanNodeId getPlanNodeId() | ||
| { | ||
| return planNodeId; | ||
| } | ||
|
|
||
| @JsonProperty | ||
| public DistributionSnapshot getSplitDistribution() | ||
| { | ||
| return splitDistribution; | ||
| } | ||
| } | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -244,7 +244,7 @@ else if (pendingSplits.isEmpty()) { | |
| nextSplitBatchFuture = splitSource.getNextBatch(splitBatchSize); | ||
|
|
||
| long start = System.nanoTime(); | ||
| addSuccessCallback(nextSplitBatchFuture, () -> stageExecution.recordGetSplitTime(start)); | ||
| addSuccessCallback(nextSplitBatchFuture, () -> stageExecution.recordGetSplitTime(partitionedNode, start)); | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. In hive connector, splits are loaded by the connector in a background thread (See BackgroundHiveSplitLoader), so recording time taken here will probably miss the actual work done in listing files by the hive connector. |
||
| } | ||
|
|
||
| if (nextSplitBatchFuture.isDone()) { | ||
|
|
||
Uh oh!
There was an error while loading. Please reload this page.