Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.trino;

import io.airlift.units.DataSize;
import io.trino.spi.TrinoException;

import static io.trino.spi.StandardErrorCode.EXCEEDED_WRITE_LIMIT;

public class ExceededWriteLimitException
extends TrinoException
{
public ExceededWriteLimitException(DataSize limit)
{
super(EXCEEDED_WRITE_LIMIT, "Exceeded write limit of " + limit.toString());
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,7 @@ public final class SystemSessionProperties
public static final String RESOURCE_OVERCOMMIT = "resource_overcommit";
public static final String QUERY_MAX_CPU_TIME = "query_max_cpu_time";
public static final String QUERY_MAX_SCAN_PHYSICAL_BYTES = "query_max_scan_physical_bytes";
public static final String QUERY_MAX_WRITE_PHYSICAL_SIZE = "query_max_write_physical_size";
public static final String QUERY_MAX_STAGE_COUNT = "query_max_stage_count";
public static final String REDISTRIBUTE_WRITES = "redistribute_writes";
public static final String USE_PREFERRED_WRITE_PARTITIONING = "use_preferred_write_partitioning";
Expand Down Expand Up @@ -403,6 +404,11 @@ public SystemSessionProperties(
"Maximum scan physical bytes of a query",
queryManagerConfig.getQueryMaxScanPhysicalBytes().orElse(null),
false),
dataSizeProperty(
QUERY_MAX_WRITE_PHYSICAL_SIZE,
"Maximum write physical size of a query",
queryManagerConfig.getQueryMaxWritePhysicalSize().orElse(null),
false),
booleanProperty(
RESOURCE_OVERCOMMIT,
"Use resources which are not guaranteed to be available to the query",
Expand Down Expand Up @@ -1354,6 +1360,11 @@ public static Optional<DataSize> getQueryMaxScanPhysicalBytes(Session session)
return Optional.ofNullable(session.getSystemProperty(QUERY_MAX_SCAN_PHYSICAL_BYTES, DataSize.class));
}

public static Optional<DataSize> getQueryMaxWritePhysicalSize(Session session)
{
return Optional.ofNullable(session.getSystemProperty(QUERY_MAX_WRITE_PHYSICAL_SIZE, DataSize.class));
}

public static boolean isSpillEnabled(Session session)
{
return session.getSystemProperty(SPILL_ENABLED, Boolean.class);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,7 @@ public class QueryManagerConfig
private Duration queryMaxPlanningTime = new Duration(10, TimeUnit.MINUTES);
private Duration queryMaxCpuTime = new Duration(1_000_000_000, TimeUnit.DAYS);
private Optional<DataSize> queryMaxScanPhysicalBytes = Optional.empty();
private Optional<DataSize> queryMaxWritePhysicalSize = Optional.empty();
private int queryReportedRuleStatsLimit = 10;
private int dispatcherQueryPoolSize = DISPATCHER_THREADPOOL_MAX_SIZE;

Expand Down Expand Up @@ -506,6 +507,19 @@ public QueryManagerConfig setQueryMaxScanPhysicalBytes(DataSize queryMaxScanPhys
return this;
}

@NotNull
public Optional<DataSize> getQueryMaxWritePhysicalSize()
{
return queryMaxWritePhysicalSize;
}

@Config("query.max-write-physical-size")
public QueryManagerConfig setQueryMaxWritePhysicalSize(DataSize queryMaxWritePhysicalSize)
{
this.queryMaxWritePhysicalSize = Optional.ofNullable(queryMaxWritePhysicalSize);
return this;
}

@Min(1)
public int getQueryReportedRuleStatsLimit()
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
import io.opentelemetry.context.Context;
import io.trino.ExceededCpuLimitException;
import io.trino.ExceededScanLimitException;
import io.trino.ExceededWriteLimitException;
import io.trino.Session;
import io.trino.execution.QueryExecution.QueryOutputInfo;
import io.trino.execution.StateMachine.StateChangeListener;
Expand Down Expand Up @@ -56,6 +57,8 @@
import static io.airlift.concurrent.Threads.threadsNamed;
import static io.trino.SystemSessionProperties.getQueryMaxCpuTime;
import static io.trino.SystemSessionProperties.getQueryMaxScanPhysicalBytes;
import static io.trino.SystemSessionProperties.getQueryMaxWritePhysicalSize;
import static io.trino.execution.QueryState.FINISHING;
import static io.trino.execution.QueryState.RUNNING;
import static io.trino.spi.StandardErrorCode.GENERIC_INTERNAL_ERROR;
import static io.trino.tracing.ScopedSpan.scopedSpan;
Expand All @@ -76,6 +79,7 @@ public class SqlQueryManager

private final Duration maxQueryCpuTime;
private final Optional<DataSize> maxQueryScanPhysicalBytes;
private final Optional<DataSize> maxQueryWritePhysicalSize;

private final ExecutorService queryExecutor;
private final ThreadPoolExecutorMBean queryExecutorMBean;
Expand All @@ -91,6 +95,7 @@ public SqlQueryManager(ClusterMemoryManager memoryManager, Tracer tracer, QueryM

this.maxQueryCpuTime = queryManagerConfig.getQueryMaxCpuTime();
this.maxQueryScanPhysicalBytes = queryManagerConfig.getQueryMaxScanPhysicalBytes();
this.maxQueryWritePhysicalSize = queryManagerConfig.getQueryMaxWritePhysicalSize();

this.queryExecutor = newCachedThreadPool(threadsNamed("query-scheduler-%s"));
this.queryExecutorMBean = new ThreadPoolExecutorMBean((ThreadPoolExecutor) queryExecutor);
Expand Down Expand Up @@ -126,6 +131,13 @@ public void start()
catch (Throwable e) {
log.error(e, "Error enforcing query scan bytes limits");
}

try {
enforceWriteLimits();
}
catch (Throwable e) {
log.error(e, "Error enforcing query write bytes limits");
}
}, 1, 1, TimeUnit.SECONDS);
}

Expand Down Expand Up @@ -371,4 +383,29 @@ private void enforceScanLimits()
});
}
}

/**
* Enforce query write physical bytes limits
*/
private void enforceWriteLimits()
{
for (QueryExecution query : queryTracker.getAllQueries()) {
if (query.isDone() || query.getState() == FINISHING) {
continue;
}
Optional<DataSize> limitOpt = getQueryMaxWritePhysicalSize(query.getSession());
if (maxQueryWritePhysicalSize.isPresent()) {
limitOpt = limitOpt
.flatMap(sessionLimit -> maxQueryWritePhysicalSize.map(serverLimit -> Ordering.natural().min(serverLimit, sessionLimit)))
.or(() -> maxQueryWritePhysicalSize);
}

limitOpt.ifPresent(writeLimit -> {
DataSize queryWriteBytes = query.getQueryInfo().getQueryStats().getPhysicalWrittenDataSize();
if (queryWriteBytes.compareTo(writeLimit) > 0) {
query.fail(new ExceededWriteLimitException(writeLimit));
}
});
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ public T set(T newState)

/**
* Tries to change the state. State will not change if the new state {@code .equals()} the current state,
* of if the current state is a terminal state. If the state changed, listeners and waiters will be notified.
* or if the current state is a terminal state. If the state changed, listeners and waiters will be notified.
*
* @return the state before the possible state change
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
import static io.airlift.units.DataSize.Unit.GIGABYTE;
import static io.airlift.units.DataSize.Unit.KILOBYTE;
import static io.airlift.units.DataSize.Unit.MEGABYTE;
import static io.airlift.units.DataSize.Unit.TERABYTE;
import static io.trino.execution.QueryManagerConfig.AVAILABLE_HEAP_MEMORY;
import static io.trino.execution.QueryManagerConfig.DEFAULT_TASK_DESCRIPTOR_STORAGE_MAX_MEMORY;
import static io.trino.execution.QueryManagerConfig.FAULT_TOLERANT_EXECUTION_MAX_PARTITION_COUNT_LIMIT;
Expand Down Expand Up @@ -71,6 +72,7 @@ public void testDefaults()
.setQueryReportedRuleStatsLimit(10)
.setDispatcherQueryPoolSize(Integer.toString(max(50, Runtime.getRuntime().availableProcessors() * 10)))
.setQueryMaxScanPhysicalBytes(null)
.setQueryMaxWritePhysicalSize(null)
.setRequiredWorkers(1)
.setRequiredWorkersMaxWait(new Duration(5, MINUTES))
.setRetryPolicy(RetryPolicy.NONE)
Expand Down Expand Up @@ -154,6 +156,7 @@ public void testExplicitPropertyMappings()
.put("query.reported-rule-stats-limit", "50")
.put("query.dispatcher-query-pool-size", "151")
.put("query.max-scan-physical-bytes", "1kB")
.put("query.max-write-physical-size", "1TB")
.put("query-manager.required-workers", "333")
.put("query-manager.required-workers-max-wait", "33m")
.put("retry-policy", "QUERY")
Expand Down Expand Up @@ -234,6 +237,7 @@ public void testExplicitPropertyMappings()
.setQueryReportedRuleStatsLimit(50)
.setDispatcherQueryPoolSize("151")
.setQueryMaxScanPhysicalBytes(DataSize.of(1, KILOBYTE))
.setQueryMaxWritePhysicalSize(DataSize.of(1, TERABYTE))
.setRequiredWorkers(333)
.setRequiredWorkersMaxWait(new Duration(33, MINUTES))
.setRetryPolicy(RetryPolicy.QUERY)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -199,6 +199,7 @@ public enum StandardErrorCode
ADMINISTRATIVELY_PREEMPTED(131080, INSUFFICIENT_RESOURCES),
EXCEEDED_SCAN_LIMIT(131081, INSUFFICIENT_RESOURCES),
EXCEEDED_TASK_DESCRIPTOR_STORAGE_CAPACITY(131082, INSUFFICIENT_RESOURCES),
EXCEEDED_WRITE_LIMIT(131083, INSUFFICIENT_RESOURCES),

UNSUPPORTED_TABLE_TYPE(133001, EXTERNAL),
/**/;
Expand Down
9 changes: 9 additions & 0 deletions docs/src/main/sphinx/admin/properties-query-management.md
Original file line number Diff line number Diff line change
Expand Up @@ -159,6 +159,15 @@ The maximum number of bytes that can be scanned by a query during its execution.
When this limit is reached, query processing is terminated to prevent excessive
resource usage.

## `query.max-write-physical-size`

- **Type:** {ref}`prop-type-data-size`
- **Session property:** `query_max_write_physical_size`

The maximum physical size of data that can be written by a query during its execution.
When this limit is reached, query processing is terminated to prevent excessive
resource usage.

## `query.max-stage-count`

- **Type:** {ref}`prop-type-integer`
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,31 +13,39 @@
*/
package io.trino.tests;

import com.google.common.collect.ImmutableMap;
import io.opentelemetry.api.trace.Span;
import io.trino.Session;
import io.trino.dispatcher.DispatchManager;
import io.trino.execution.QueryInfo;
import io.trino.execution.QueryManager;
import io.trino.execution.QueryState;
import io.trino.plugin.hive.TestingHivePlugin;
import io.trino.server.BasicQueryInfo;
import io.trino.server.SessionContext;
import io.trino.server.protocol.Slug;
import io.trino.spi.QueryId;
import io.trino.spi.TrinoException;
import io.trino.testing.DistributedQueryRunner;
import io.trino.testing.QueryRunner;
import io.trino.tests.tpch.TpchQueryRunner;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
import org.junit.jupiter.api.parallel.Execution;

import java.nio.file.Path;

import static io.trino.SessionTestUtils.TEST_SESSION;
import static io.trino.SystemSessionProperties.QUERY_MAX_WRITE_PHYSICAL_SIZE;
import static io.trino.execution.QueryRunnerUtil.createQuery;
import static io.trino.execution.QueryRunnerUtil.waitForQueryState;
import static io.trino.execution.QueryState.FAILED;
import static io.trino.execution.QueryState.RUNNING;
import static io.trino.spi.StandardErrorCode.EXCEEDED_CPU_LIMIT;
import static io.trino.spi.StandardErrorCode.EXCEEDED_SCAN_LIMIT;
import static io.trino.spi.StandardErrorCode.EXCEEDED_WRITE_LIMIT;
import static io.trino.spi.StandardErrorCode.GENERIC_INTERNAL_ERROR;
import static io.trino.testing.TestingSession.testSessionBuilder;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Fail.fail;
import static org.junit.jupiter.api.parallel.ExecutionMode.SAME_THREAD;
Expand Down Expand Up @@ -132,4 +140,65 @@ public void testQueryScanExceededSession()
assertThat(queryInfo.getErrorCode()).isEqualTo(EXCEEDED_SCAN_LIMIT.toErrorCode());
}
}

@Test
@Timeout(60)
public void testQueryWriteExceeded()
throws Exception
{
try (DistributedQueryRunner queryRunner = TpchQueryRunner.builder()
.addExtraProperty("query.max-write-physical-size", "0B")
.build()) {
Path hiveDataDir = queryRunner.getCoordinator().getBaseDataDir().resolve("hive_data");
queryRunner.installPlugin(new TestingHivePlugin(hiveDataDir));
queryRunner.createCatalog("hive", "hive", ImmutableMap.of(
"hive.metastore", "file",
"hive.metastore.catalog.dir", hiveDataDir.toFile().getAbsolutePath(),
"fs.hadoop.enabled", "true"));

Session session = testSessionBuilder()
.setCatalog("hive")
.setSchema("test")
.build();

queryRunner.execute(session, "CREATE SCHEMA IF NOT EXISTS test");
QueryId queryId = createQuery(queryRunner, session, "CREATE TABLE test_table AS SELECT * FROM tpch.tiny.orders");

waitForQueryState(queryRunner, queryId, FAILED);
QueryManager queryManager = queryRunner.getCoordinator().getQueryManager();
BasicQueryInfo queryInfo = queryManager.getQueryInfo(queryId);
assertThat(queryInfo.getState()).isEqualTo(FAILED);
assertThat(queryInfo.getErrorCode()).isEqualTo(EXCEEDED_WRITE_LIMIT.toErrorCode());
}
}

@Test
@Timeout(60)
public void testQueryWriteExceededSession()
throws Exception
{
try (DistributedQueryRunner queryRunner = TpchQueryRunner.builder().build()) {
Path hiveDataDir = queryRunner.getCoordinator().getBaseDataDir().resolve("hive_data");
queryRunner.installPlugin(new TestingHivePlugin(hiveDataDir));
queryRunner.createCatalog("hive", "hive", ImmutableMap.of(
"hive.metastore", "file",
"hive.metastore.catalog.dir", queryRunner.getCoordinator().getBaseDataDir().resolve("hive_data").toFile().getAbsolutePath(),
"fs.hadoop.enabled", "true"));

Session session = testSessionBuilder()
.setCatalog("hive")
.setSchema("test")
.setSystemProperty(QUERY_MAX_WRITE_PHYSICAL_SIZE, "0B")
.build();

queryRunner.execute(session, "CREATE SCHEMA IF NOT EXISTS test");
QueryId queryId = createQuery(queryRunner, session, "CREATE TABLE test_table AS SELECT * FROM tpch.tiny.orders");

waitForQueryState(queryRunner, queryId, FAILED);
QueryManager queryManager = queryRunner.getCoordinator().getQueryManager();
BasicQueryInfo queryInfo = queryManager.getQueryInfo(queryId);
assertThat(queryInfo.getState()).isEqualTo(FAILED);
assertThat(queryInfo.getErrorCode()).isEqualTo(EXCEEDED_WRITE_LIMIT.toErrorCode());
}
}
}