diff --git a/core/src/main/resources/error/error-classes.json b/core/src/main/resources/error/error-classes.json index 4da9d2f9fbca..8c0f1659ea50 100644 --- a/core/src/main/resources/error/error-classes.json +++ b/core/src/main/resources/error/error-classes.json @@ -484,6 +484,11 @@ "Failed to execute user defined function (: () => )" ] }, + "FAILED_FUNCTION_CALL" : { + "message" : [ + "Failed preparing of the function for call. Please, double check function's arguments." + ] + }, "FAILED_RENAME_PATH" : { "message" : [ "Failed to rename to as destination already exists" diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/FunctionRegistry.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/FunctionRegistry.scala index f5e494e90967..a1cecc4b6e09 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/FunctionRegistry.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/FunctionRegistry.scala @@ -23,7 +23,6 @@ import javax.annotation.concurrent.GuardedBy import scala.collection.mutable import scala.reflect.ClassTag -import org.apache.spark.SparkThrowable import org.apache.spark.internal.Logging import org.apache.spark.sql.AnalysisException import org.apache.spark.sql.catalyst.FunctionIdentifier @@ -132,11 +131,7 @@ object FunctionRegistryBase { } catch { // the exception is an invocation exception. To get a meaningful message, we need the // cause. - case e: Exception => - throw e.getCause match { - case ae: SparkThrowable => ae - case _ => new AnalysisException(e.getCause.getMessage) - } + case e: Exception => throw QueryCompilationErrors.funcBuildError(name, e) } } else { // Otherwise, find a constructor method that matches the number of arguments, and use that. @@ -153,7 +148,7 @@ object FunctionRegistryBase { } catch { // the exception is an invocation exception. To get a meaningful message, we need the // cause. - case e: Exception => throw new AnalysisException(e.getCause.getMessage) + case e: Exception => throw QueryCompilationErrors.funcBuildError(name, e) } } } diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryCompilationErrors.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryCompilationErrors.scala index cbdbb6adc11b..6e353b2499db 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryCompilationErrors.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryCompilationErrors.scala @@ -21,7 +21,7 @@ import scala.collection.mutable import org.apache.hadoop.fs.Path -import org.apache.spark.SparkThrowableHelper +import org.apache.spark.{SparkThrowable, SparkThrowableHelper} import org.apache.spark.sql.AnalysisException import org.apache.spark.sql.catalyst.{FunctionIdentifier, QualifiedTableName, TableIdentifier} import org.apache.spark.sql.catalyst.analysis.{CannotReplaceMissingTableException, FunctionAlreadyExistsException, NamespaceAlreadyExistsException, NoSuchFunctionException, NoSuchNamespaceException, NoSuchPartitionException, NoSuchTableException, ResolvedTable, Star, TableAlreadyExistsException, UnresolvedRegex} @@ -3393,4 +3393,15 @@ private[sql] object QueryCompilationErrors extends QueryErrorsBase { "unsupported" -> unsupported.toString, "class" -> unsupported.getClass.toString)) } + + def funcBuildError(funcName: String, cause: Exception): Throwable = { + cause.getCause match { + case st: SparkThrowable with Throwable => st + case other => + new AnalysisException( + errorClass = "FAILED_FUNCTION_CALL", + messageParameters = Map("funcName" -> toSQLId(funcName)), + cause = Option(other)) + } + } } diff --git a/sql/core/src/test/resources/sql-tests/results/ansi/string-functions.sql.out b/sql/core/src/test/resources/sql-tests/results/ansi/string-functions.sql.out index 41f1922f8bd5..3ab49c14bef1 100644 --- a/sql/core/src/test/resources/sql-tests/results/ansi/string-functions.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/ansi/string-functions.sql.out @@ -1609,7 +1609,21 @@ select to_binary('abc', 1) struct<> -- !query output org.apache.spark.sql.AnalysisException -The 'format' parameter of function 'to_binary' needs to be a string literal.; line 1 pos 7 +{ + "errorClass" : "_LEGACY_ERROR_TEMP_1100", + "messageParameters" : { + "argName" : "format", + "funcName" : "to_binary", + "requiredType" : "string" + }, + "queryContext" : [ { + "objectType" : "", + "objectName" : "", + "startIndex" : 8, + "stopIndex" : 26, + "fragment" : "to_binary('abc', 1)" + } ] +} -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/csv-functions.sql.out b/sql/core/src/test/resources/sql-tests/results/csv-functions.sql.out index d76f20ee835a..c0df2751933f 100644 --- a/sql/core/src/test/resources/sql-tests/results/csv-functions.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/csv-functions.sql.out @@ -21,7 +21,19 @@ select from_csv('1', 1) struct<> -- !query output org.apache.spark.sql.AnalysisException -The expression '1' is not a valid schema string.; line 1 pos 7 +{ + "errorClass" : "_LEGACY_ERROR_TEMP_1092", + "messageParameters" : { + "expr" : "1" + }, + "queryContext" : [ { + "objectType" : "", + "objectName" : "", + "startIndex" : 8, + "stopIndex" : 23, + "fragment" : "from_csv('1', 1)" + } ] +} -- !query @@ -30,20 +42,21 @@ select from_csv('1', 'a InvalidType') struct<> -- !query output org.apache.spark.sql.AnalysisException -Cannot parse the data type: -[PARSE_SYNTAX_ERROR] Syntax error at or near 'InvalidType': extra input 'InvalidType'(line 1, pos 2) - -== SQL == -a InvalidType ---^^^ - -Failed fallback parsing: -DataType invalidtype is not supported.(line 1, pos 2) - -== SQL == -a InvalidType ---^^^ -; line 1 pos 7 +{ + "errorClass" : "_LEGACY_ERROR_TEMP_1227", + "messageParameters" : { + "e1" : "\n[PARSE_SYNTAX_ERROR] Syntax error at or near 'InvalidType': extra input 'InvalidType'(line 1, pos 2)\n\n== SQL ==\na InvalidType\n--^^^\n", + "e2" : "\nDataType invalidtype is not supported.(line 1, pos 2)\n\n== SQL ==\na InvalidType\n--^^^\n", + "msg" : "Cannot parse the data type: " + }, + "queryContext" : [ { + "objectType" : "", + "objectName" : "", + "startIndex" : 8, + "stopIndex" : 37, + "fragment" : "from_csv('1', 'a InvalidType')" + } ] +} -- !query @@ -52,7 +65,16 @@ select from_csv('1', 'a INT', named_struct('mode', 'PERMISSIVE')) struct<> -- !query output org.apache.spark.sql.AnalysisException -Must use a map() function for options.; line 1 pos 7 +{ + "errorClass" : "_LEGACY_ERROR_TEMP_1096", + "queryContext" : [ { + "objectType" : "", + "objectName" : "", + "startIndex" : 8, + "stopIndex" : 65, + "fragment" : "from_csv('1', 'a INT', named_struct('mode', 'PERMISSIVE'))" + } ] +} -- !query @@ -61,7 +83,19 @@ select from_csv('1', 'a INT', map('mode', 1)) struct<> -- !query output org.apache.spark.sql.AnalysisException -A type of keys and values in map() must be string, but got map.; line 1 pos 7 +{ + "errorClass" : "_LEGACY_ERROR_TEMP_1095", + "messageParameters" : { + "map" : "map" + }, + "queryContext" : [ { + "objectType" : "", + "objectName" : "", + "startIndex" : 8, + "stopIndex" : 45, + "fragment" : "from_csv('1', 'a INT', map('mode', 1))" + } ] +} -- !query @@ -187,7 +221,16 @@ select to_csv(named_struct('a', 1, 'b', 2), named_struct('mode', 'PERMISSIVE')) struct<> -- !query output org.apache.spark.sql.AnalysisException -Must use a map() function for options.; line 1 pos 7 +{ + "errorClass" : "_LEGACY_ERROR_TEMP_1096", + "queryContext" : [ { + "objectType" : "", + "objectName" : "", + "startIndex" : 8, + "stopIndex" : 79, + "fragment" : "to_csv(named_struct('a', 1, 'b', 2), named_struct('mode', 'PERMISSIVE'))" + } ] +} -- !query @@ -196,4 +239,16 @@ select to_csv(named_struct('a', 1, 'b', 2), map('mode', 1)) struct<> -- !query output org.apache.spark.sql.AnalysisException -A type of keys and values in map() must be string, but got map.; line 1 pos 7 +{ + "errorClass" : "_LEGACY_ERROR_TEMP_1095", + "messageParameters" : { + "map" : "map" + }, + "queryContext" : [ { + "objectType" : "", + "objectName" : "", + "startIndex" : 8, + "stopIndex" : 59, + "fragment" : "to_csv(named_struct('a', 1, 'b', 2), map('mode', 1))" + } ] +} diff --git a/sql/core/src/test/resources/sql-tests/results/extract.sql.out b/sql/core/src/test/resources/sql-tests/results/extract.sql.out index 1c40f623ea1c..298e39813161 100644 --- a/sql/core/src/test/resources/sql-tests/results/extract.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/extract.sql.out @@ -317,7 +317,20 @@ select extract(not_supported from c) from t struct<> -- !query output org.apache.spark.sql.AnalysisException -Literals of type 'not_supported' are currently not supported for the string type.; line 1 pos 7 +{ + "errorClass" : "_LEGACY_ERROR_TEMP_1102", + "messageParameters" : { + "field" : "not_supported", + "srcDataType" : "string" + }, + "queryContext" : [ { + "objectType" : "", + "objectName" : "", + "startIndex" : 8, + "stopIndex" : 36, + "fragment" : "extract(not_supported from c)" + } ] +} -- !query @@ -326,7 +339,20 @@ select extract(not_supported from i) from t struct<> -- !query output org.apache.spark.sql.AnalysisException -Literals of type 'not_supported' are currently not supported for the interval year to month type.; line 1 pos 7 +{ + "errorClass" : "_LEGACY_ERROR_TEMP_1102", + "messageParameters" : { + "field" : "not_supported", + "srcDataType" : "interval year to month" + }, + "queryContext" : [ { + "objectType" : "", + "objectName" : "", + "startIndex" : 8, + "stopIndex" : 36, + "fragment" : "extract(not_supported from i)" + } ] +} -- !query @@ -335,7 +361,20 @@ select extract(not_supported from j) from t struct<> -- !query output org.apache.spark.sql.AnalysisException -Literals of type 'not_supported' are currently not supported for the interval day to second type.; line 1 pos 7 +{ + "errorClass" : "_LEGACY_ERROR_TEMP_1102", + "messageParameters" : { + "field" : "not_supported", + "srcDataType" : "interval day to second" + }, + "queryContext" : [ { + "objectType" : "", + "objectName" : "", + "startIndex" : 8, + "stopIndex" : 36, + "fragment" : "extract(not_supported from j)" + } ] +} -- !query @@ -924,7 +963,20 @@ select extract(DAY from interval '2-1' YEAR TO MONTH) struct<> -- !query output org.apache.spark.sql.AnalysisException -Literals of type 'DAY' are currently not supported for the interval year to month type.; line 1 pos 7 +{ + "errorClass" : "_LEGACY_ERROR_TEMP_1102", + "messageParameters" : { + "field" : "DAY", + "srcDataType" : "interval year to month" + }, + "queryContext" : [ { + "objectType" : "", + "objectName" : "", + "startIndex" : 8, + "stopIndex" : 53, + "fragment" : "extract(DAY from interval '2-1' YEAR TO MONTH)" + } ] +} -- !query @@ -1081,7 +1133,20 @@ select extract(MONTH from interval '123 12:34:56.789123123' DAY TO SECOND) struct<> -- !query output org.apache.spark.sql.AnalysisException -Literals of type 'MONTH' are currently not supported for the interval day to second type.; line 1 pos 7 +{ + "errorClass" : "_LEGACY_ERROR_TEMP_1102", + "messageParameters" : { + "field" : "MONTH", + "srcDataType" : "interval day to second" + }, + "queryContext" : [ { + "objectType" : "", + "objectName" : "", + "startIndex" : 8, + "stopIndex" : 74, + "fragment" : "extract(MONTH from interval '123 12:34:56.789123123' DAY TO SECOND)" + } ] +} -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/json-functions.sql.out b/sql/core/src/test/resources/sql-tests/results/json-functions.sql.out index 96a0fc935c00..cb8d4fca4942 100644 --- a/sql/core/src/test/resources/sql-tests/results/json-functions.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/json-functions.sql.out @@ -69,7 +69,16 @@ select to_json(named_struct('a', 1, 'b', 2), named_struct('mode', 'PERMISSIVE')) struct<> -- !query output org.apache.spark.sql.AnalysisException -Must use a map() function for options.; line 1 pos 7 +{ + "errorClass" : "_LEGACY_ERROR_TEMP_1096", + "queryContext" : [ { + "objectType" : "", + "objectName" : "", + "startIndex" : 8, + "stopIndex" : 80, + "fragment" : "to_json(named_struct('a', 1, 'b', 2), named_struct('mode', 'PERMISSIVE'))" + } ] +} -- !query @@ -78,7 +87,19 @@ select to_json(named_struct('a', 1, 'b', 2), map('mode', 1)) struct<> -- !query output org.apache.spark.sql.AnalysisException -A type of keys and values in map() must be string, but got map.; line 1 pos 7 +{ + "errorClass" : "_LEGACY_ERROR_TEMP_1095", + "messageParameters" : { + "map" : "map" + }, + "queryContext" : [ { + "objectType" : "", + "objectName" : "", + "startIndex" : 8, + "stopIndex" : 60, + "fragment" : "to_json(named_struct('a', 1, 'b', 2), map('mode', 1))" + } ] +} -- !query @@ -126,7 +147,19 @@ select from_json('{"a":1}', 1) struct<> -- !query output org.apache.spark.sql.AnalysisException -The expression '1' is not a valid schema string.; line 1 pos 7 +{ + "errorClass" : "_LEGACY_ERROR_TEMP_1092", + "messageParameters" : { + "expr" : "1" + }, + "queryContext" : [ { + "objectType" : "", + "objectName" : "", + "startIndex" : 8, + "stopIndex" : 30, + "fragment" : "from_json('{\"a\":1}', 1)" + } ] +} -- !query @@ -135,20 +168,21 @@ select from_json('{"a":1}', 'a InvalidType') struct<> -- !query output org.apache.spark.sql.AnalysisException -Cannot parse the data type: -[PARSE_SYNTAX_ERROR] Syntax error at or near 'InvalidType': extra input 'InvalidType'(line 1, pos 2) - -== SQL == -a InvalidType ---^^^ - -Failed fallback parsing: -DataType invalidtype is not supported.(line 1, pos 2) - -== SQL == -a InvalidType ---^^^ -; line 1 pos 7 +{ + "errorClass" : "_LEGACY_ERROR_TEMP_1227", + "messageParameters" : { + "e1" : "\n[PARSE_SYNTAX_ERROR] Syntax error at or near 'InvalidType': extra input 'InvalidType'(line 1, pos 2)\n\n== SQL ==\na InvalidType\n--^^^\n", + "e2" : "\nDataType invalidtype is not supported.(line 1, pos 2)\n\n== SQL ==\na InvalidType\n--^^^\n", + "msg" : "Cannot parse the data type: " + }, + "queryContext" : [ { + "objectType" : "", + "objectName" : "", + "startIndex" : 8, + "stopIndex" : 44, + "fragment" : "from_json('{\"a\":1}', 'a InvalidType')" + } ] +} -- !query @@ -157,7 +191,16 @@ select from_json('{"a":1}', 'a INT', named_struct('mode', 'PERMISSIVE')) struct<> -- !query output org.apache.spark.sql.AnalysisException -Must use a map() function for options.; line 1 pos 7 +{ + "errorClass" : "_LEGACY_ERROR_TEMP_1096", + "queryContext" : [ { + "objectType" : "", + "objectName" : "", + "startIndex" : 8, + "stopIndex" : 72, + "fragment" : "from_json('{\"a\":1}', 'a INT', named_struct('mode', 'PERMISSIVE'))" + } ] +} -- !query @@ -166,7 +209,19 @@ select from_json('{"a":1}', 'a INT', map('mode', 1)) struct<> -- !query output org.apache.spark.sql.AnalysisException -A type of keys and values in map() must be string, but got map.; line 1 pos 7 +{ + "errorClass" : "_LEGACY_ERROR_TEMP_1095", + "messageParameters" : { + "map" : "map" + }, + "queryContext" : [ { + "objectType" : "", + "objectName" : "", + "startIndex" : 8, + "stopIndex" : 52, + "fragment" : "from_json('{\"a\":1}', 'a INT', map('mode', 1))" + } ] +} -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/postgreSQL/int8.sql.out b/sql/core/src/test/resources/sql-tests/results/postgreSQL/int8.sql.out index dd18f1d2e8cb..ece4efc6f89a 100755 --- a/sql/core/src/test/resources/sql-tests/results/postgreSQL/int8.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/postgreSQL/int8.sql.out @@ -856,7 +856,7 @@ org.apache.spark.sql.AnalysisException "errorClass" : "_LEGACY_ERROR_TEMP_1179", "messageParameters" : { "arguments" : "long, long, integer", - "details" : "requirement failed: step (0) cannot be 0", + "details" : "[FAILED_FUNCTION_CALL] Failed preparing of the function `range` for call. Please, double check function's arguments.", "name" : "range", "usage" : "\n range(start: long, end: long, step: long, numSlices: integer)\n range(start: long, end: long, step: long)\n range(start: long, end: long)\n range(end: long)" }, diff --git a/sql/core/src/test/resources/sql-tests/results/string-functions.sql.out b/sql/core/src/test/resources/sql-tests/results/string-functions.sql.out index 4bcb69ed773a..2ea5cefa38d1 100644 --- a/sql/core/src/test/resources/sql-tests/results/string-functions.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/string-functions.sql.out @@ -1541,7 +1541,21 @@ select to_binary('abc', 1) struct<> -- !query output org.apache.spark.sql.AnalysisException -The 'format' parameter of function 'to_binary' needs to be a string literal.; line 1 pos 7 +{ + "errorClass" : "_LEGACY_ERROR_TEMP_1100", + "messageParameters" : { + "argName" : "format", + "funcName" : "to_binary", + "requiredType" : "string" + }, + "queryContext" : [ { + "objectType" : "", + "objectName" : "", + "startIndex" : 8, + "stopIndex" : 26, + "fragment" : "to_binary('abc', 1)" + } ] +} -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/table-valued-functions.sql.out b/sql/core/src/test/resources/sql-tests/results/table-valued-functions.sql.out index 4833542fec4c..4747d36d500f 100644 --- a/sql/core/src/test/resources/sql-tests/results/table-valued-functions.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/table-valued-functions.sql.out @@ -155,7 +155,7 @@ org.apache.spark.sql.AnalysisException "errorClass" : "_LEGACY_ERROR_TEMP_1179", "messageParameters" : { "arguments" : "integer, integer, integer", - "details" : "requirement failed: step (0) cannot be 0", + "details" : "[FAILED_FUNCTION_CALL] Failed preparing of the function `range` for call. Please, double check function's arguments.", "name" : "range", "usage" : "\n range(start: long, end: long, step: long, numSlices: integer)\n range(start: long, end: long, step: long)\n range(start: long, end: long)\n range(end: long)" },