@@ -357,8 +357,12 @@ class SparkContext(config: SparkConf) extends Logging {
357357 }
358358
359359 // Optionally scale number of executors dynamically based on workload. Exposed for testing.
360+ private val dynamicAllocationEnabled = conf.getBoolean(" spark.dynamicAllocation.enabled" , false )
361+ private val dynamicAllocationTesting = conf.getBoolean(" spark.dynamicAllocation.testing" , false )
360362 private [spark] val executorAllocationManager : Option [ExecutorAllocationManager ] =
361- if (conf.getBoolean(" spark.dynamicAllocation.enabled" , false )) {
363+ if (dynamicAllocationEnabled) {
364+ assert(master.contains(" yarn" ) || dynamicAllocationTesting,
365+ " Dynamic allocation of executors is currently only supported in YARN mode" )
362366 Some (new ExecutorAllocationManager (this ))
363367 } else {
364368 None
@@ -989,6 +993,8 @@ class SparkContext(config: SparkConf) extends Logging {
989993 */
990994 @ DeveloperApi
991995 def requestExecutors (numAdditionalExecutors : Int ): Boolean = {
996+ assert(master.contains(" yarn" ) || dynamicAllocationTesting,
997+ " Requesting executors is currently only supported in YARN mode" )
992998 schedulerBackend match {
993999 case b : CoarseGrainedSchedulerBackend =>
9941000 b.requestExecutors(numAdditionalExecutors)
@@ -1005,6 +1011,8 @@ class SparkContext(config: SparkConf) extends Logging {
10051011 */
10061012 @ DeveloperApi
10071013 def killExecutors (executorIds : Seq [String ]): Boolean = {
1014+ assert(master.contains(" yarn" ) || dynamicAllocationTesting,
1015+ " Killing executors is currently only supported in YARN mode" )
10081016 schedulerBackend match {
10091017 case b : CoarseGrainedSchedulerBackend =>
10101018 b.killExecutors(executorIds)
0 commit comments