From 68981311ab16d490fa1b0ecba2496face0f97981 Mon Sep 17 00:00:00 2001 From: ulysses-you Date: Thu, 4 May 2023 19:49:33 +0800 Subject: [PATCH] Enable spark.sql.thriftServer.interruptOnCancel by default --- docs/sql-migration-guide.md | 1 + .../src/main/scala/org/apache/spark/sql/internal/SQLConf.scala | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/sql-migration-guide.md b/docs/sql-migration-guide.md index 11ed5402ee1f0..80df50273a123 100644 --- a/docs/sql-migration-guide.md +++ b/docs/sql-migration-guide.md @@ -25,6 +25,7 @@ license: | ## Upgrading from Spark SQL 3.4 to 3.5 - Since Spark 3.5, the JDBC options related to DS V2 pushdown are `true` by default. These options include: `pushDownAggregate`, `pushDownLimit`, `pushDownOffset` and `pushDownTableSample`. To restore the legacy behavior, please set them to `false`. e.g. set `spark.sql.catalog.your_catalog_name.pushDownAggregate` to `false`. +- Since Spark 3.5, Spark thrift server will interrupt task when canceling a running statement. To restore the previous behavior, set `spark.sql.thriftServer.interruptOnCancel` to `false`. ## Upgrading from Spark SQL 3.3 to 3.4 diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala index 874f95af1cbd2..bd40295d81a47 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala @@ -1383,7 +1383,7 @@ object SQLConf { "When false, all running tasks will remain until finished.") .version("3.2.0") .booleanConf - .createWithDefault(false) + .createWithDefault(true) val THRIFTSERVER_QUERY_TIMEOUT = buildConf("spark.sql.thriftServer.queryTimeout")