diff --git a/R/pkg/tests/fulltests/test_sparkSQL_arrow.R b/R/pkg/tests/fulltests/test_sparkSQL_arrow.R index 97972753a78fa..16d93763ff038 100644 --- a/R/pkg/tests/fulltests/test_sparkSQL_arrow.R +++ b/R/pkg/tests/fulltests/test_sparkSQL_arrow.R @@ -312,4 +312,22 @@ test_that("Arrow optimization - unsupported types", { }) }) +test_that("SPARK-32478: gapply() Arrow optimization - error message for schema mismatch", { + skip_if_not_installed("arrow") + df <- createDataFrame(list(list(a = 1L, b = "a"))) + + conf <- callJMethod(sparkSession, "conf") + arrowEnabled <- sparkR.conf("spark.sql.execution.arrow.sparkr.enabled")[[1]] + + callJMethod(conf, "set", "spark.sql.execution.arrow.sparkr.enabled", "true") + tryCatch({ + expect_error( + count(gapply(df, "a", function(key, group) { group }, structType("a int, b int"))), + "expected IntegerType, IntegerType, got IntegerType, StringType") + }, + finally = { + callJMethod(conf, "set", "spark.sql.execution.arrow.sparkr.enabled", arrowEnabled) + }) +}) + sparkR.session.stop() diff --git a/docs/sparkr.md b/docs/sparkr.md index fa1bb1b851815..05310f89f278d 100644 --- a/docs/sparkr.md +++ b/docs/sparkr.md @@ -681,12 +681,12 @@ The current supported minimum version is 1.0.0; however, this might change betwe Arrow optimization is available when converting a Spark DataFrame to an R DataFrame using the call `collect(spark_df)`, when creating a Spark DataFrame from an R DataFrame with `createDataFrame(r_df)`, when applying an R native function to each partition via `dapply(...)` and when applying an R native function to grouped data via `gapply(...)`. -To use Arrow when executing these calls, users need to first set the Spark configuration ‘spark.sql.execution.arrow.sparkr.enabled’ -to ‘true’. This is disabled by default. +To use Arrow when executing these, users need to set the Spark configuration ‘spark.sql.execution.arrow.sparkr.enabled’ +to ‘true’ first. This is disabled by default. -In addition, optimizations enabled by ‘spark.sql.execution.arrow.sparkr.enabled’ could fallback automatically to non-Arrow optimization -implementation if an error occurs before the actual computation within Spark during converting a Spark DataFrame to/from an R -DataFrame. +Whether the optimization is enabled or not, SparkR produces the same results. In addition, the conversion +between Spark DataFrame and R DataFrame falls back automatically to non-Arrow optimization implementation +when the optimization fails for any reasons before the actual computation.