-
Notifications
You must be signed in to change notification settings - Fork 29k
[SPARK-4751] Dynamic allocation in standalone mode #7532
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
80047aa
49702d1
42ac215
58cb06f
32abe44
1334e9a
2eb5f3f
b7742af
0a8be79
a82e907
6832bd7
2e762d6
24149eb
c0a2c02
ee686a8
accc8f6
879e928
b3c1736
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -531,8 +531,6 @@ class SparkContext(config: SparkConf) extends Logging with ExecutorAllocationCli | |
| val dynamicAllocationEnabled = _conf.getBoolean("spark.dynamicAllocation.enabled", false) | ||
| _executorAllocationManager = | ||
| if (dynamicAllocationEnabled) { | ||
| assert(supportDynamicAllocation, | ||
| "Dynamic allocation of executors is currently only supported in YARN and Mesos mode") | ||
| Some(new ExecutorAllocationManager(this, listenerBus, _conf)) | ||
| } else { | ||
| None | ||
|
|
@@ -1361,17 +1359,6 @@ class SparkContext(config: SparkConf) extends Logging with ExecutorAllocationCli | |
| postEnvironmentUpdate() | ||
| } | ||
|
|
||
| /** | ||
| * Return whether dynamically adjusting the amount of resources allocated to | ||
| * this application is supported. This is currently only available for YARN | ||
| * and Mesos coarse-grained mode. | ||
| */ | ||
| private[spark] def supportDynamicAllocation: Boolean = { | ||
| (master.contains("yarn") | ||
| || master.contains("mesos") | ||
| || _conf.getBoolean("spark.dynamicAllocation.testing", false)) | ||
| } | ||
|
|
||
| /** | ||
| * :: DeveloperApi :: | ||
| * Register a listener to receive up-calls from events that happen during execution. | ||
|
|
@@ -1400,8 +1387,6 @@ class SparkContext(config: SparkConf) extends Logging with ExecutorAllocationCli | |
| localityAwareTasks: Int, | ||
| hostToLocalTaskCount: scala.collection.immutable.Map[String, Int] | ||
| ): Boolean = { | ||
| assert(supportDynamicAllocation, | ||
| "Requesting executors is currently only supported in YARN and Mesos modes") | ||
| schedulerBackend match { | ||
| case b: CoarseGrainedSchedulerBackend => | ||
| b.requestTotalExecutors(numExecutors, localityAwareTasks, hostToLocalTaskCount) | ||
|
|
@@ -1414,12 +1399,10 @@ class SparkContext(config: SparkConf) extends Logging with ExecutorAllocationCli | |
| /** | ||
| * :: DeveloperApi :: | ||
| * Request an additional number of executors from the cluster manager. | ||
| * This is currently only supported in YARN mode. Return whether the request is received. | ||
| * @return whether the request is received. | ||
| */ | ||
| @DeveloperApi | ||
| override def requestExecutors(numAdditionalExecutors: Int): Boolean = { | ||
| assert(supportDynamicAllocation, | ||
| "Requesting executors is currently only supported in YARN and Mesos modes") | ||
| schedulerBackend match { | ||
| case b: CoarseGrainedSchedulerBackend => | ||
| b.requestExecutors(numAdditionalExecutors) | ||
|
|
@@ -1438,12 +1421,10 @@ class SparkContext(config: SparkConf) extends Logging with ExecutorAllocationCli | |
| * through this method with new ones, it should follow up explicitly with a call to | ||
| * {{SparkContext#requestExecutors}}. | ||
| * | ||
| * This is currently only supported in YARN mode. Return whether the request is received. | ||
| * @return whether the request is received. | ||
| */ | ||
| @DeveloperApi | ||
| override def killExecutors(executorIds: Seq[String]): Boolean = { | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. outdated comments...BTW, how to comment line without the changes? |
||
| assert(supportDynamicAllocation, | ||
| "Killing executors is currently only supported in YARN and Mesos modes") | ||
| schedulerBackend match { | ||
| case b: CoarseGrainedSchedulerBackend => | ||
| b.killExecutors(executorIds) | ||
|
|
@@ -1462,7 +1443,7 @@ class SparkContext(config: SparkConf) extends Logging with ExecutorAllocationCli | |
| * through this method with a new one, it should follow up explicitly with a call to | ||
| * {{SparkContext#requestExecutors}}. | ||
| * | ||
| * This is currently only supported in YARN mode. Return whether the request is received. | ||
| * @return whether the request is received. | ||
| */ | ||
| @DeveloperApi | ||
| override def killExecutor(executorId: String): Boolean = super.killExecutor(executorId) | ||
|
|
@@ -1479,7 +1460,7 @@ class SparkContext(config: SparkConf) extends Logging with ExecutorAllocationCli | |
| * can steal the window of opportunity and acquire this application's resources in the | ||
| * mean time. | ||
| * | ||
| * This is currently only supported in YARN mode. Return whether the request is received. | ||
| * @return whether the request is received. | ||
| */ | ||
| private[spark] def killAndReplaceExecutor(executorId: String): Boolean = { | ||
| schedulerBackend match { | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -197,6 +197,22 @@ private[spark] class AppClient( | |
| sendToMaster(UnregisterApplication(appId)) | ||
| context.reply(true) | ||
| stop() | ||
|
|
||
| case r: RequestExecutors => | ||
| master match { | ||
| case Some(m) => context.reply(m.askWithRetry[Boolean](r)) | ||
| case None => | ||
| logWarning("Attempted to request executors before registering with Master.") | ||
| context.reply(false) | ||
| } | ||
|
|
||
| case k: KillExecutors => | ||
| master match { | ||
| case Some(m) => context.reply(m.askWithRetry[Boolean](k)) | ||
| case None => | ||
| logWarning("Attempted to kill executors before registering with Master.") | ||
| context.reply(false) | ||
| } | ||
| } | ||
|
|
||
| override def onDisconnected(address: RpcAddress): Unit = { | ||
|
|
@@ -257,4 +273,33 @@ private[spark] class AppClient( | |
| endpoint = null | ||
| } | ||
| } | ||
|
|
||
| /** | ||
| * Request executors from the Master by specifying the total number desired, | ||
| * including existing pending and running executors. | ||
| * | ||
| * @return whether the request is acknowledged. | ||
| */ | ||
| def requestTotalExecutors(requestedTotal: Int): Boolean = { | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. is it necessary to validate the value of
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. this is already done in |
||
| if (endpoint != null && appId != null) { | ||
| endpoint.askWithRetry[Boolean](RequestExecutors(appId, requestedTotal)) | ||
| } else { | ||
| logWarning("Attempted to request executors before driver fully initialized.") | ||
| false | ||
| } | ||
| } | ||
|
|
||
| /** | ||
| * Kill the given list of executors through the Master. | ||
| * @return whether the kill request is acknowledged. | ||
| */ | ||
| def killExecutors(executorIds: Seq[String]): Boolean = { | ||
| if (endpoint != null && appId != null) { | ||
| endpoint.askWithRetry[Boolean](KillExecutors(appId, executorIds)) | ||
| } else { | ||
| logWarning("Attempted to kill executors before driver fully initialized.") | ||
| false | ||
| } | ||
| } | ||
|
|
||
| } | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
outdated comments for this function? https://github.com/apache/spark/pull/7532/files#diff-364713d7776956cb8b0a771e9b62f82dR1389