Skip to content

Commit ae396cf

Browse files
committed
Addressed latest comments
1 parent cb12fec commit ae396cf

File tree

3 files changed

+3
-11
lines changed

3 files changed

+3
-11
lines changed

docs/configuration.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1397,10 +1397,10 @@ Apart from these, the following properties are also available, and may be useful
13971397
</tr>
13981398
<tr>
13991399
<td><code>spark.scheduler.minRegisteredResourcesRatio</code></td>
1400-
<td>0.8 for YARN mode; 0.0 for standalone mode and Mesos coarse-grained mode</td>
1400+
<td>2.3.0 for KUBERNETES mode; 0.8 for YARN mode; 0.0 for standalone mode and Mesos coarse-grained mode</td>
14011401
<td>
14021402
The minimum ratio of registered resources (registered resources / total expected resources)
1403-
(resources are executors in yarn mode, CPU cores in standalone mode and Mesos coarsed-grained
1403+
(resources are executors in yarn mode and Kubernetes mode, CPU cores in standalone mode and Mesos coarsed-grained
14041404
mode ['spark.cores.max' value is total expected resources for Mesos coarse-grained mode] )
14051405
to wait for before scheduling begins. Specified as a double between 0.0 and 1.0.
14061406
Regardless of whether the minimum ratio of resources has been reached,

resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/k8s/ExecutorPodFactory.scala

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -46,8 +46,6 @@ private[spark] trait ExecutorPodFactory {
4646
private[spark] class ExecutorPodFactoryImpl(sparkConf: SparkConf)
4747
extends ExecutorPodFactory {
4848

49-
import ExecutorPodFactoryImpl._
50-
5149
private val executorExtraClasspath =
5250
sparkConf.get(org.apache.spark.internal.config.EXECUTOR_CLASS_PATH)
5351

@@ -76,7 +74,6 @@ private[spark] class ExecutorPodFactoryImpl(sparkConf: SparkConf)
7674

7775
private val executorDockerImage = sparkConf.get(EXECUTOR_DOCKER_IMAGE)
7876
private val dockerImagePullPolicy = sparkConf.get(DOCKER_IMAGE_PULL_POLICY)
79-
private val executorPort = sparkConf.getInt("spark.executor.port", DEFAULT_STATIC_PORT)
8077
private val blockManagerPort = sparkConf
8178
.getInt("spark.blockmanager.port", DEFAULT_BLOCKMANAGER_PORT)
8279

@@ -139,7 +136,6 @@ private[spark] class ExecutorPodFactoryImpl(sparkConf: SparkConf)
139136
}
140137
}.getOrElse(Seq.empty[EnvVar])
141138
val executorEnv = (Seq(
142-
(ENV_EXECUTOR_PORT, executorPort.toString),
143139
(ENV_DRIVER_URL, driverUrl),
144140
// Executor backend expects integral value for executor cores, so round it up to an int.
145141
(ENV_EXECUTOR_CORES, math.ceil(executorCores).toInt.toString),
@@ -159,7 +155,6 @@ private[spark] class ExecutorPodFactoryImpl(sparkConf: SparkConf)
159155
.build()
160156
) ++ executorExtraJavaOptionsEnv ++ executorExtraClasspathEnv.toSeq
161157
val requiredPorts = Seq(
162-
(EXECUTOR_PORT_NAME, executorPort),
163158
(BLOCK_MANAGER_PORT_NAME, blockManagerPort))
164159
.map { case (name, port) =>
165160
new ContainerPortBuilder()
@@ -220,7 +215,3 @@ private[spark] class ExecutorPodFactoryImpl(sparkConf: SparkConf)
220215
.build()
221216
}
222217
}
223-
224-
private object ExecutorPodFactoryImpl {
225-
private val DEFAULT_STATIC_PORT = 10000
226-
}

resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/k8s/KubernetesClusterSchedulerBackend.scala

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -224,6 +224,7 @@ private[spark] class KubernetesClusterSchedulerBackend(
224224
override def stop(): Unit = {
225225
// stop allocation of new resources and caches.
226226
allocatorExecutor.shutdown()
227+
allocatorExecutor.awaitTermination(30, TimeUnit.SECONDS)
227228

228229
// send stop message to executors so they shut down cleanly
229230
super.stop()

0 commit comments

Comments
 (0)