diff --git a/core/src/main/scala/org/apache/spark/internal/config/package.scala b/core/src/main/scala/org/apache/spark/internal/config/package.scala index 0aed1af023f8..8bf615b23383 100644 --- a/core/src/main/scala/org/apache/spark/internal/config/package.scala +++ b/core/src/main/scala/org/apache/spark/internal/config/package.scala @@ -60,7 +60,7 @@ package object config { .createWithDefaultString("1g") private[spark] val DRIVER_MEMORY_OVERHEAD = ConfigBuilder("spark.driver.memoryOverhead") - .doc("The amount of off-heap memory to be allocated per driver in cluster mode, " + + .doc("The amount of non-heap memory to be allocated per driver in cluster mode, " + "in MiB unless otherwise specified.") .bytesConf(ByteUnit.MiB) .createOptional @@ -185,7 +185,7 @@ package object config { .createWithDefaultString("1g") private[spark] val EXECUTOR_MEMORY_OVERHEAD = ConfigBuilder("spark.executor.memoryOverhead") - .doc("The amount of off-heap memory to be allocated per executor in cluster mode, " + + .doc("The amount of non-heap memory to be allocated per executor in cluster mode, " + "in MiB unless otherwise specified.") .bytesConf(ByteUnit.MiB) .createOptional diff --git a/docs/configuration.md b/docs/configuration.md index d0b2699a5dc7..c59626c34858 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -181,10 +181,16 @@ of the most common options to set are:
spark.driver.memoryOverheadspark.memory.offHeap.enabled=true) and memory used by other driver processes
+ (e.g. python process that goes with a PySpark driver) and memory used by other non-driver
+ processes running in the same container. The maximum memory size of container to running
+ driver is determined by the sum of spark.driver.memoryOverhead
+ and spark.driver.memory.
spark.executor.memoryOverheadspark.memory.offHeap.enabled=true) and memory used by other executor processes
+ (e.g. python process that goes with a PySpark executor) and memory used by other non-executor
+ processes running in the same container. The maximum memory size of container to running executor
+ is determined by the sum of spark.executor.memoryOverhead and
+ spark.executor.memory.
spark.memory.offHeap.size must be positive.
+ Note: If off-heap memory is enabled, may need to raise the non-heap memory size
+ (e.g. increase spark.driver.memoryOverhead or
+ spark.executor.memoryOverhead).