-
Notifications
You must be signed in to change notification settings - Fork 29k
[SPARK-29930][SQL] Remove SQL configs declared to be removed in Spark 3.0 #26559
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change | ||
|---|---|---|---|---|
|
|
@@ -720,14 +720,6 @@ object SQLConf { | |||
| .stringConf | ||||
| .createWithDefault("_corrupt_record") | ||||
|
|
||||
| val FROM_JSON_FORCE_NULLABLE_SCHEMA = buildConf("spark.sql.fromJsonForceNullableSchema") | ||||
| .internal() | ||||
| .doc("When true, force the output schema of the from_json() function to be nullable " + | ||||
| "(including all the fields). Otherwise, the schema might not be compatible with" + | ||||
| "actual data, which leads to corruptions. This config will be removed in Spark 3.0.") | ||||
| .booleanConf | ||||
| .createWithDefault(true) | ||||
|
|
||||
| val BROADCAST_TIMEOUT = buildConf("spark.sql.broadcastTimeout") | ||||
| .doc("Timeout in seconds for the broadcast wait time in broadcast joins.") | ||||
| .timeConf(TimeUnit.SECONDS) | ||||
|
|
@@ -1687,14 +1679,6 @@ object SQLConf { | |||
| "the SQL parser.") | ||||
| .fallbackConf(ANSI_ENABLED) | ||||
|
|
||||
| val ALLOW_CREATING_MANAGED_TABLE_USING_NONEMPTY_LOCATION = | ||||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Was this explicitly to be removed in 3.0? doesn't say so in the doc but it may have been otherwise documented or well understood.
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes, it is mentioned in the SQL migration guide: spark/docs/sql-migration-guide.md Line 315 in 80fbc38
|
||||
| buildConf("spark.sql.legacy.allowCreatingManagedTableUsingNonemptyLocation") | ||||
| .internal() | ||||
| .doc("When this option is set to true, creating managed tables with nonempty location " + | ||||
| "is allowed. Otherwise, an analysis exception is thrown. ") | ||||
| .booleanConf | ||||
| .createWithDefault(false) | ||||
|
|
||||
| val VALIDATE_PARTITION_COLUMNS = | ||||
| buildConf("spark.sql.sources.validatePartitionColumns") | ||||
| .internal() | ||||
|
|
@@ -1913,16 +1897,6 @@ object SQLConf { | |||
| .checkValues((1 to 9).toSet + Deflater.DEFAULT_COMPRESSION) | ||||
| .createWithDefault(Deflater.DEFAULT_COMPRESSION) | ||||
|
|
||||
| val COMPARE_DATE_TIMESTAMP_IN_TIMESTAMP = | ||||
| buildConf("spark.sql.legacy.compareDateTimestampInTimestamp") | ||||
| .internal() | ||||
| .doc("When true (default), compare Date with Timestamp after converting both sides to " + | ||||
| "Timestamp. This behavior is compatible with Hive 2.2 or later. See HIVE-15236. " + | ||||
| "When false, restore the behavior prior to Spark 2.4. Compare Date with Timestamp after " + | ||||
| "converting both sides to string. This config will be removed in Spark 3.0.") | ||||
| .booleanConf | ||||
| .createWithDefault(true) | ||||
|
|
||||
| val LEGACY_SIZE_OF_NULL = buildConf("spark.sql.legacy.sizeOfNull") | ||||
| .doc("If it is set to true, size of null returns -1. This behavior was inherited from Hive. " + | ||||
| "The size function returns null for null input if the flag is disabled.") | ||||
|
|
@@ -2236,8 +2210,6 @@ class SQLConf extends Serializable with Logging { | |||
| def caseSensitiveInferenceMode: HiveCaseSensitiveInferenceMode.Value = | ||||
| HiveCaseSensitiveInferenceMode.withName(getConf(HIVE_CASE_SENSITIVE_INFERENCE)) | ||||
|
|
||||
| def compareDateTimestampInTimestamp : Boolean = getConf(COMPARE_DATE_TIMESTAMP_IN_TIMESTAMP) | ||||
|
|
||||
| def gatherFastStats: Boolean = getConf(GATHER_FASTSTAT) | ||||
|
|
||||
| def optimizerMetadataOnly: Boolean = getConf(OPTIMIZER_METADATA_ONLY) | ||||
|
|
@@ -2516,9 +2488,6 @@ class SQLConf extends Serializable with Logging { | |||
|
|
||||
| def eltOutputAsString: Boolean = getConf(ELT_OUTPUT_AS_STRING) | ||||
|
|
||||
| def allowCreatingManagedTableUsingNonemptyLocation: Boolean = | ||||
| getConf(ALLOW_CREATING_MANAGED_TABLE_USING_NONEMPTY_LOCATION) | ||||
|
|
||||
| def validatePartitionColumns: Boolean = getConf(VALIDATE_PARTITION_COLUMNS) | ||||
|
|
||||
| def partitionOverwriteMode: PartitionOverwriteMode.Value = | ||||
|
|
||||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Can we throw an exception if users try to set the removed conf to a value that is different from the default?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Sure, we could throw an exception for 3 configs. I am just wondering why we silently ignore non-existed SQL configs:
How about throwing
AnalysisExceptionfor not existed SQL configs that have thespark.sqlprefix but don't present inspark/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
Line 50 in 6d64fc2
or there are SQL configs that we have to bypass for some reasons?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Here is the PR #27057