From b40c6b2ee168c99b8ed742b41979e48fa925defa Mon Sep 17 00:00:00 2001 From: Jia Fan Date: Sat, 29 Jul 2023 09:45:29 +0800 Subject: [PATCH 1/6] [SPARK-44577][SQL] Fix INSERT BY NAME returns nonsensical error message --- .../sql/catalyst/analysis/TableOutputResolver.scala | 10 +++++++--- .../org/apache/spark/sql/SQLInsertTestSuite.scala | 5 +++-- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/TableOutputResolver.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/TableOutputResolver.scala index 894cd0b39911..ef1f8bcb3b71 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/TableOutputResolver.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/TableOutputResolver.scala @@ -241,7 +241,7 @@ object TableOutputResolver { val extraCols = inputCols.filterNot(col => matchedCols.contains(col.name)) .map(col => s"${toSQLId(col.name)}").mkString(", ") throw QueryCompilationErrors.incompatibleDataToTableExtraStructFieldsError( - tableName, colPath.quoted, extraCols + tableName, pathQuotedSafety(colPath), extraCols ) } else { reordered @@ -251,6 +251,10 @@ object TableOutputResolver { } } + private def pathQuotedSafety(colPath: Seq[String]): String = { + if (colPath.isEmpty) "table" else colPath.quoted + } + private def resolveColumnsByPosition( tableName: String, inputCols: Seq[NamedExpression], @@ -264,14 +268,14 @@ object TableOutputResolver { .map(col => toSQLId(col.name)) .mkString(", ") throw QueryCompilationErrors.incompatibleDataToTableExtraStructFieldsError( - tableName, colPath.quoted, extraColsStr + tableName, pathQuotedSafety(colPath), extraColsStr ) } else if (inputCols.size < expectedCols.size) { val missingColsStr = expectedCols.takeRight(expectedCols.size - inputCols.size) .map(col => toSQLId(col.name)) .mkString(", ") throw QueryCompilationErrors.incompatibleDataToTableStructMissingFieldsError( - tableName, colPath.quoted, missingColsStr + tableName, pathQuotedSafety(colPath), missingColsStr ) } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/SQLInsertTestSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/SQLInsertTestSuite.scala index 0bbed51d0a90..a402c3d591e2 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/SQLInsertTestSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/SQLInsertTestSuite.scala @@ -213,9 +213,10 @@ trait SQLInsertTestSuite extends QueryTest with SQLTestUtils { exception = intercept[AnalysisException] { processInsert("t1", df, overwrite = false, byName = true) }, - v1ErrorClass = "_LEGACY_ERROR_TEMP_1186", + v1ErrorClass = "INCOMPATIBLE_DATA_FOR_TABLE.EXTRA_STRUCT_FIELDS", v2ErrorClass = "INCOMPATIBLE_DATA_FOR_TABLE.CANNOT_FIND_DATA", - v1Parameters = Map.empty[String, String], + v1Parameters = Map("tableName" -> "`spark_catalog`.`default`.`t1`", "colName" -> "`table`", + "extraFields" -> "`x1`"), v2Parameters = Map("tableName" -> "`testcat`.`t1`", "colName" -> "`c1`") ) val df2 = Seq((3, 2, 1, 0)).toDF(Seq("c3", "c2", "c1", "c0"): _*) From ed32de3e57f9d61c284479ac7c3a8459d15534e3 Mon Sep 17 00:00:00 2001 From: Jia Fan Date: Mon, 31 Jul 2023 14:50:42 +0800 Subject: [PATCH 2/6] update --- .../analysis/ResolveInsertionBase.scala | 4 +- .../analysis/TableOutputResolver.scala | 41 ++++++++++++------- .../sql/errors/QueryCompilationErrors.scala | 8 ++-- .../apache/spark/sql/SQLInsertTestSuite.scala | 6 +-- 4 files changed, 35 insertions(+), 24 deletions(-) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveInsertionBase.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveInsertionBase.scala index 8b120095bc60..ad89005a093e 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveInsertionBase.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveInsertionBase.scala @@ -36,10 +36,10 @@ abstract class ResolveInsertionBase extends Rule[LogicalPlan] { if (i.userSpecifiedCols.size != i.query.output.size) { if (i.userSpecifiedCols.size > i.query.output.size) { throw QueryCompilationErrors.cannotWriteNotEnoughColumnsToTableError( - tblName, i.userSpecifiedCols, i.query) + tblName, i.userSpecifiedCols, i.query.output) } else { throw QueryCompilationErrors.cannotWriteTooManyColumnsToTableError( - tblName, i.userSpecifiedCols, i.query) + tblName, i.userSpecifiedCols, i.query.output) } } val projectByName = i.userSpecifiedCols.zip(i.query.output) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/TableOutputResolver.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/TableOutputResolver.scala index ef1f8bcb3b71..76515e31c149 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/TableOutputResolver.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/TableOutputResolver.scala @@ -49,7 +49,7 @@ object TableOutputResolver { if (actualExpectedCols.size < query.output.size) { throw QueryCompilationErrors.cannotWriteTooManyColumnsToTableError( - tableName, actualExpectedCols.map(_.name), query) + tableName, actualExpectedCols.map(_.name), query.output) } val errors = new mutable.ArrayBuffer[String]() @@ -77,7 +77,7 @@ object TableOutputResolver { } if (actualExpectedCols.size > queryOutputCols.size) { throw QueryCompilationErrors.cannotWriteNotEnoughColumnsToTableError( - tableName, actualExpectedCols.map(_.name), query) + tableName, actualExpectedCols.map(_.name), query.output) } resolveColumnsByPosition(tableName, queryOutputCols, actualExpectedCols, conf, errors += _) @@ -240,9 +240,14 @@ object TableOutputResolver { if (matchedCols.size < inputCols.length) { val extraCols = inputCols.filterNot(col => matchedCols.contains(col.name)) .map(col => s"${toSQLId(col.name)}").mkString(", ") - throw QueryCompilationErrors.incompatibleDataToTableExtraStructFieldsError( - tableName, pathQuotedSafety(colPath), extraCols - ) + if (colPath.isEmpty) { + throw QueryCompilationErrors.cannotWriteNotEnoughColumnsToTableError(tableName, + expectedCols.map(_.name), inputCols.map(_.toAttribute)) + } else { + throw QueryCompilationErrors.incompatibleDataToTableExtraStructFieldsError( + tableName, colPath.quoted, extraCols + ) + } } else { reordered } @@ -251,10 +256,6 @@ object TableOutputResolver { } } - private def pathQuotedSafety(colPath: Seq[String]): String = { - if (colPath.isEmpty) "table" else colPath.quoted - } - private def resolveColumnsByPosition( tableName: String, inputCols: Seq[NamedExpression], @@ -267,16 +268,26 @@ object TableOutputResolver { val extraColsStr = inputCols.takeRight(inputCols.size - expectedCols.size) .map(col => toSQLId(col.name)) .mkString(", ") - throw QueryCompilationErrors.incompatibleDataToTableExtraStructFieldsError( - tableName, pathQuotedSafety(colPath), extraColsStr - ) + if (colPath.isEmpty) { + throw QueryCompilationErrors.cannotWriteTooManyColumnsToTableError(tableName, + expectedCols.map(_.name), inputCols.map(_.toAttribute)) + } else { + throw QueryCompilationErrors.incompatibleDataToTableExtraStructFieldsError( + tableName, colPath.quoted, extraColsStr + ) + } } else if (inputCols.size < expectedCols.size) { val missingColsStr = expectedCols.takeRight(expectedCols.size - inputCols.size) .map(col => toSQLId(col.name)) .mkString(", ") - throw QueryCompilationErrors.incompatibleDataToTableStructMissingFieldsError( - tableName, pathQuotedSafety(colPath), missingColsStr - ) + if (colPath.isEmpty) { + throw QueryCompilationErrors.cannotWriteNotEnoughColumnsToTableError(tableName, + expectedCols.map(_.name), inputCols.map(_.toAttribute)) + } else { + throw QueryCompilationErrors.incompatibleDataToTableStructMissingFieldsError( + tableName, colPath.quoted, missingColsStr + ) + } } inputCols.zip(expectedCols).flatMap { case (inputCol, expectedCol) => diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryCompilationErrors.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryCompilationErrors.scala index 8c653c89eec7..a2891cbddceb 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryCompilationErrors.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryCompilationErrors.scala @@ -2130,25 +2130,25 @@ private[sql] object QueryCompilationErrors extends QueryErrorsBase { def cannotWriteTooManyColumnsToTableError( tableName: String, expected: Seq[String], - query: LogicalPlan): Throwable = { + queryOutput: Seq[Attribute]): Throwable = { new AnalysisException( errorClass = "INSERT_COLUMN_ARITY_MISMATCH.TOO_MANY_DATA_COLUMNS", messageParameters = Map( "tableName" -> toSQLId(tableName), "tableColumns" -> expected.map(c => toSQLId(c)).mkString(", "), - "dataColumns" -> query.output.map(c => toSQLId(c.name)).mkString(", "))) + "dataColumns" -> queryOutput.map(c => toSQLId(c.name)).mkString(", "))) } def cannotWriteNotEnoughColumnsToTableError( tableName: String, expected: Seq[String], - query: LogicalPlan): Throwable = { + queryOutput: Seq[Attribute]): Throwable = { new AnalysisException( errorClass = "INSERT_COLUMN_ARITY_MISMATCH.NOT_ENOUGH_DATA_COLUMNS", messageParameters = Map( "tableName" -> toSQLId(tableName), "tableColumns" -> expected.map(c => toSQLId(c)).mkString(", "), - "dataColumns" -> query.output.map(c => toSQLId(c.name)).mkString(", "))) + "dataColumns" -> queryOutput.map(c => toSQLId(c.name)).mkString(", "))) } def incompatibleDataToTableCannotFindDataError( diff --git a/sql/core/src/test/scala/org/apache/spark/sql/SQLInsertTestSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/SQLInsertTestSuite.scala index a402c3d591e2..38d4ba829291 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/SQLInsertTestSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/SQLInsertTestSuite.scala @@ -213,10 +213,10 @@ trait SQLInsertTestSuite extends QueryTest with SQLTestUtils { exception = intercept[AnalysisException] { processInsert("t1", df, overwrite = false, byName = true) }, - v1ErrorClass = "INCOMPATIBLE_DATA_FOR_TABLE.EXTRA_STRUCT_FIELDS", + v1ErrorClass = "INSERT_COLUMN_ARITY_MISMATCH.NOT_ENOUGH_DATA_COLUMNS", v2ErrorClass = "INCOMPATIBLE_DATA_FOR_TABLE.CANNOT_FIND_DATA", - v1Parameters = Map("tableName" -> "`spark_catalog`.`default`.`t1`", "colName" -> "`table`", - "extraFields" -> "`x1`"), + v1Parameters = Map("tableName" -> "`spark_catalog`.`default`.`t1`", + "tableColumns" -> "`c1`, `c2`, `c3`", "dataColumns" -> "`c3`, `c2`, `x1`"), v2Parameters = Map("tableName" -> "`testcat`.`t1`", "colName" -> "`c1`") ) val df2 = Seq((3, 2, 1, 0)).toDF(Seq("c3", "c2", "c1", "c0"): _*) From 5058a895a54bcb76b7d76582a7d24eb61d4bfa74 Mon Sep 17 00:00:00 2001 From: Jia Fan Date: Tue, 1 Aug 2023 10:17:08 +0800 Subject: [PATCH 3/6] update --- .../sql/catalyst/analysis/TableOutputResolver.scala | 9 +++++---- .../scala/org/apache/spark/sql/SQLInsertTestSuite.scala | 5 ++--- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/TableOutputResolver.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/TableOutputResolver.scala index 76515e31c149..e0d6257ea1eb 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/TableOutputResolver.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/TableOutputResolver.scala @@ -238,12 +238,13 @@ object TableOutputResolver { if (reordered.length == expectedCols.length) { if (matchedCols.size < inputCols.length) { - val extraCols = inputCols.filterNot(col => matchedCols.contains(col.name)) - .map(col => s"${toSQLId(col.name)}").mkString(", ") if (colPath.isEmpty) { - throw QueryCompilationErrors.cannotWriteNotEnoughColumnsToTableError(tableName, - expectedCols.map(_.name), inputCols.map(_.toAttribute)) + val cannotFindCol = expectedCols.filter(col => !matchedCols.contains(col.name)).head.name + throw QueryCompilationErrors.incompatibleDataToTableCannotFindDataError(tableName, + cannotFindCol) } else { + val extraCols = inputCols.filterNot(col => matchedCols.contains(col.name)) + .map(col => s"${toSQLId(col.name)}").mkString(", ") throw QueryCompilationErrors.incompatibleDataToTableExtraStructFieldsError( tableName, colPath.quoted, extraCols ) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/SQLInsertTestSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/SQLInsertTestSuite.scala index 38d4ba829291..08a0a00df055 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/SQLInsertTestSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/SQLInsertTestSuite.scala @@ -213,10 +213,9 @@ trait SQLInsertTestSuite extends QueryTest with SQLTestUtils { exception = intercept[AnalysisException] { processInsert("t1", df, overwrite = false, byName = true) }, - v1ErrorClass = "INSERT_COLUMN_ARITY_MISMATCH.NOT_ENOUGH_DATA_COLUMNS", + v1ErrorClass = "INCOMPATIBLE_DATA_FOR_TABLE.CANNOT_FIND_DATA", v2ErrorClass = "INCOMPATIBLE_DATA_FOR_TABLE.CANNOT_FIND_DATA", - v1Parameters = Map("tableName" -> "`spark_catalog`.`default`.`t1`", - "tableColumns" -> "`c1`, `c2`, `c3`", "dataColumns" -> "`c3`, `c2`, `x1`"), + v1Parameters = Map("tableName" -> "`spark_catalog`.`default`.`t1`", "colName" -> "`c1`"), v2Parameters = Map("tableName" -> "`testcat`.`t1`", "colName" -> "`c1`") ) val df2 = Seq((3, 2, 1, 0)).toDF(Seq("c3", "c2", "c1", "c0"): _*) From 4936877b40bbd46a53450c0199ebdad38cd121c2 Mon Sep 17 00:00:00 2001 From: Jia Fan Date: Mon, 7 Aug 2023 16:09:18 +0800 Subject: [PATCH 4/6] fix format --- .../spark/sql/catalyst/analysis/TableOutputResolver.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/TableOutputResolver.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/TableOutputResolver.scala index e0d6257ea1eb..f60b8e113b0f 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/TableOutputResolver.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/TableOutputResolver.scala @@ -244,7 +244,7 @@ object TableOutputResolver { cannotFindCol) } else { val extraCols = inputCols.filterNot(col => matchedCols.contains(col.name)) - .map(col => s"${toSQLId(col.name)}").mkString(", ") + .map(col => s"${toSQLId(col.name)}").mkString(", ") throw QueryCompilationErrors.incompatibleDataToTableExtraStructFieldsError( tableName, colPath.quoted, extraCols ) From 01dd81bb03a0c82e88d0566bec27bb7efbe7c700 Mon Sep 17 00:00:00 2001 From: Jia Fan Date: Fri, 18 Aug 2023 13:56:00 +0800 Subject: [PATCH 5/6] update --- .../src/main/resources/error/error-classes.json | 5 +++++ ...itions-incompatible-data-for-table-error-class.md | 4 ++++ .../sql/catalyst/analysis/TableOutputResolver.scala | 12 +++++------- .../spark/sql/errors/QueryCompilationErrors.scala | 11 +++++++++++ .../org/apache/spark/sql/SQLInsertTestSuite.scala | 4 ++-- 5 files changed, 27 insertions(+), 9 deletions(-) diff --git a/common/utils/src/main/resources/error/error-classes.json b/common/utils/src/main/resources/error/error-classes.json index 753004041943..8aa0de886c6b 100644 --- a/common/utils/src/main/resources/error/error-classes.json +++ b/common/utils/src/main/resources/error/error-classes.json @@ -1001,6 +1001,11 @@ "Cannot safely cast to ." ] }, + "EXTRA_FIELDS" : { + "message" : [ + "Cannot write extra fields ." + ] + }, "EXTRA_STRUCT_FIELDS" : { "message" : [ "Cannot write extra fields to the struct ." diff --git a/docs/sql-error-conditions-incompatible-data-for-table-error-class.md b/docs/sql-error-conditions-incompatible-data-for-table-error-class.md index f70b69ba6c5b..c555c712d4b1 100644 --- a/docs/sql-error-conditions-incompatible-data-for-table-error-class.md +++ b/docs/sql-error-conditions-incompatible-data-for-table-error-class.md @@ -37,6 +37,10 @@ Cannot find data for the output column ``. Cannot safely cast `` `` to ``. +## EXTRA_FIELDS + +Cannot write extra fields ``. + ## EXTRA_STRUCT_FIELDS Cannot write extra fields `` to the struct ``. diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/TableOutputResolver.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/TableOutputResolver.scala index f60b8e113b0f..de2e2d9f0216 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/TableOutputResolver.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/TableOutputResolver.scala @@ -238,16 +238,14 @@ object TableOutputResolver { if (reordered.length == expectedCols.length) { if (matchedCols.size < inputCols.length) { + val extraCols = inputCols.filterNot(col => matchedCols.contains(col.name)) + .map(col => s"${toSQLId(col.name)}").mkString(", ") if (colPath.isEmpty) { - val cannotFindCol = expectedCols.filter(col => !matchedCols.contains(col.name)).head.name - throw QueryCompilationErrors.incompatibleDataToTableCannotFindDataError(tableName, - cannotFindCol) + throw QueryCompilationErrors.incompatibleDataToTableExtraFieldsError(tableName, + extraCols) } else { - val extraCols = inputCols.filterNot(col => matchedCols.contains(col.name)) - .map(col => s"${toSQLId(col.name)}").mkString(", ") throw QueryCompilationErrors.incompatibleDataToTableExtraStructFieldsError( - tableName, colPath.quoted, extraCols - ) + tableName, colPath.quoted, extraCols) } } else { reordered diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryCompilationErrors.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryCompilationErrors.scala index a2891cbddceb..6607ff663718 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryCompilationErrors.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryCompilationErrors.scala @@ -2173,6 +2173,17 @@ private[sql] object QueryCompilationErrors extends QueryErrorsBase { ) } + def incompatibleDataToTableExtraFieldsError( + tableName: String, colName: String): Throwable = { + new AnalysisException( + errorClass = "INCOMPATIBLE_DATA_FOR_TABLE.EXTRA_FIELDS", + messageParameters = Map( + "tableName" -> toSQLId(tableName), + "colName" -> toSQLId(colName) + ) + ) + } + def incompatibleDataToTableExtraStructFieldsError( tableName: String, colName: String, extraFields: String): Throwable = { new AnalysisException( diff --git a/sql/core/src/test/scala/org/apache/spark/sql/SQLInsertTestSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/SQLInsertTestSuite.scala index 08a0a00df055..8a6337cf8536 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/SQLInsertTestSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/SQLInsertTestSuite.scala @@ -213,9 +213,9 @@ trait SQLInsertTestSuite extends QueryTest with SQLTestUtils { exception = intercept[AnalysisException] { processInsert("t1", df, overwrite = false, byName = true) }, - v1ErrorClass = "INCOMPATIBLE_DATA_FOR_TABLE.CANNOT_FIND_DATA", + v1ErrorClass = "INCOMPATIBLE_DATA_FOR_TABLE.EXTRA_FIELDS", v2ErrorClass = "INCOMPATIBLE_DATA_FOR_TABLE.CANNOT_FIND_DATA", - v1Parameters = Map("tableName" -> "`spark_catalog`.`default`.`t1`", "colName" -> "`c1`"), + v1Parameters = Map("tableName" -> "`spark_catalog`.`default`.`t1`", "colName" -> "`x1`"), v2Parameters = Map("tableName" -> "`testcat`.`t1`", "colName" -> "`c1`") ) val df2 = Seq((3, 2, 1, 0)).toDF(Seq("c3", "c2", "c1", "c0"): _*) From 827a25c2b640747de84e8e97a37e213558c6a22f Mon Sep 17 00:00:00 2001 From: Jia Fan Date: Thu, 31 Aug 2023 15:45:29 +0800 Subject: [PATCH 6/6] update --- common/utils/src/main/resources/error/error-classes.json | 4 ++-- ...-conditions-incompatible-data-for-table-error-class.md | 4 ++-- .../spark/sql/catalyst/analysis/TableOutputResolver.scala | 2 +- .../apache/spark/sql/errors/QueryCompilationErrors.scala | 8 ++++---- .../scala/org/apache/spark/sql/SQLInsertTestSuite.scala | 5 +++-- 5 files changed, 12 insertions(+), 11 deletions(-) diff --git a/common/utils/src/main/resources/error/error-classes.json b/common/utils/src/main/resources/error/error-classes.json index 7907602d17a0..c6c9d421d208 100644 --- a/common/utils/src/main/resources/error/error-classes.json +++ b/common/utils/src/main/resources/error/error-classes.json @@ -1025,9 +1025,9 @@ "Cannot safely cast to ." ] }, - "EXTRA_FIELDS" : { + "EXTRA_COLUMNS" : { "message" : [ - "Cannot write extra fields ." + "Cannot write extra columns ." ] }, "EXTRA_STRUCT_FIELDS" : { diff --git a/docs/sql-error-conditions-incompatible-data-for-table-error-class.md b/docs/sql-error-conditions-incompatible-data-for-table-error-class.md index c555c712d4b1..0dd28e9d55c5 100644 --- a/docs/sql-error-conditions-incompatible-data-for-table-error-class.md +++ b/docs/sql-error-conditions-incompatible-data-for-table-error-class.md @@ -37,9 +37,9 @@ Cannot find data for the output column ``. Cannot safely cast `` `` to ``. -## EXTRA_FIELDS +## EXTRA_COLUMNS -Cannot write extra fields ``. +Cannot write extra columns ``. ## EXTRA_STRUCT_FIELDS diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/TableOutputResolver.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/TableOutputResolver.scala index 4e68e1bfeb16..ddb17d6c43e0 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/TableOutputResolver.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/TableOutputResolver.scala @@ -279,7 +279,7 @@ object TableOutputResolver { val extraCols = inputCols.filterNot(col => matchedCols.contains(col.name)) .map(col => s"${toSQLId(col.name)}").mkString(", ") if (colPath.isEmpty) { - throw QueryCompilationErrors.incompatibleDataToTableExtraFieldsError(tableName, + throw QueryCompilationErrors.incompatibleDataToTableExtraColumnsError(tableName, extraCols) } else { throw QueryCompilationErrors.incompatibleDataToTableExtraStructFieldsError( diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryCompilationErrors.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryCompilationErrors.scala index c121385ee455..32688c8641e0 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryCompilationErrors.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryCompilationErrors.scala @@ -2191,13 +2191,13 @@ private[sql] object QueryCompilationErrors extends QueryErrorsBase with Compilat ) } - def incompatibleDataToTableExtraFieldsError( - tableName: String, colName: String): Throwable = { + def incompatibleDataToTableExtraColumnsError( + tableName: String, extraColumns: String): Throwable = { new AnalysisException( - errorClass = "INCOMPATIBLE_DATA_FOR_TABLE.EXTRA_FIELDS", + errorClass = "INCOMPATIBLE_DATA_FOR_TABLE.EXTRA_COLUMNS", messageParameters = Map( "tableName" -> toSQLId(tableName), - "colName" -> toSQLId(colName) + "extraColumns" -> extraColumns ) ) } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/SQLInsertTestSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/SQLInsertTestSuite.scala index 8a6337cf8536..34e4ded09b5f 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/SQLInsertTestSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/SQLInsertTestSuite.scala @@ -213,9 +213,10 @@ trait SQLInsertTestSuite extends QueryTest with SQLTestUtils { exception = intercept[AnalysisException] { processInsert("t1", df, overwrite = false, byName = true) }, - v1ErrorClass = "INCOMPATIBLE_DATA_FOR_TABLE.EXTRA_FIELDS", + v1ErrorClass = "INCOMPATIBLE_DATA_FOR_TABLE.EXTRA_COLUMNS", v2ErrorClass = "INCOMPATIBLE_DATA_FOR_TABLE.CANNOT_FIND_DATA", - v1Parameters = Map("tableName" -> "`spark_catalog`.`default`.`t1`", "colName" -> "`x1`"), + v1Parameters = Map("tableName" -> "`spark_catalog`.`default`.`t1`", + "extraColumns" -> "`x1`"), v2Parameters = Map("tableName" -> "`testcat`.`t1`", "colName" -> "`c1`") ) val df2 = Seq((3, 2, 1, 0)).toDF(Seq("c3", "c2", "c1", "c0"): _*)