From 4afbaf0f6c26ec88397426742b51a7ab2fe59c0c Mon Sep 17 00:00:00 2001 From: Li Zhihui Date: Thu, 26 Feb 2015 12:38:01 +0800 Subject: [PATCH 1/3] Support configuration spark.scheduler.minRegisteredResourcesRatio in Mesos mode. --- .../cluster/mesos/CoarseMesosSchedulerBackend.scala | 5 +++++ .../scheduler/cluster/mesos/MesosSchedulerBackend.scala | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/CoarseMesosSchedulerBackend.scala b/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/CoarseMesosSchedulerBackend.scala index 90dfe14352a8..5442aa04c218 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/CoarseMesosSchedulerBackend.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/CoarseMesosSchedulerBackend.scala @@ -63,6 +63,8 @@ private[spark] class CoarseMesosSchedulerBackend( // Maximum number of cores to acquire (TODO: we'll need more flexible controls here) val maxCores = conf.get("spark.cores.max", Int.MaxValue.toString).toInt + val totalExpectedCores = conf.getInt("spark.cores.max", 0) + // Cores we have acquired with each Mesos task ID val coresByTaskId = new HashMap[Int, Int] var totalCoresAcquired = 0 @@ -333,4 +335,7 @@ private[spark] class CoarseMesosSchedulerBackend( super.applicationId } + override def sufficientResourcesRegistered(): Boolean = { + totalCoreCount.get() >= totalExpectedCores * minRegisteredRatio + } } diff --git a/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerBackend.scala b/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerBackend.scala index cfb6592e14aa..72e2622d9471 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerBackend.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerBackend.scala @@ -69,6 +69,11 @@ private[spark] class MesosSchedulerBackend( val listenerBus = sc.listenerBus @volatile var appId: String = _ + + if (!sc.getConf.getOption("spark.scheduler.minRegisteredResourcesRatio").isEmpty) { + logWarning("spark.scheduler.minRegisteredResourcesRatio is set, " + + "but it will be ignored in mesos fine-grained mode.") + } override def start() { synchronized { From 17213f5c30531c6aa7833ec40079602f18c73e3a Mon Sep 17 00:00:00 2001 From: Li Zhihui Date: Thu, 26 Feb 2015 12:47:17 +0800 Subject: [PATCH 2/3] Fix some code and doc issues --- .../spark/scheduler/cluster/mesos/MesosSchedulerBackend.scala | 2 +- docs/configuration.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerBackend.scala b/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerBackend.scala index 72e2622d9471..6c5c8601fa9f 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerBackend.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerBackend.scala @@ -70,7 +70,7 @@ private[spark] class MesosSchedulerBackend( @volatile var appId: String = _ - if (!sc.getConf.getOption("spark.scheduler.minRegisteredResourcesRatio").isEmpty) { + if (sc.conf.contains("spark.scheduler.minRegisteredResourcesRatio")) { logWarning("spark.scheduler.minRegisteredResourcesRatio is set, " + "but it will be ignored in mesos fine-grained mode.") } diff --git a/docs/configuration.md b/docs/configuration.md index 8dd2bad61344..b06fda7a37a4 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -1021,7 +1021,7 @@ Apart from these, the following properties are also available, and may be useful 0.0 for Mesos and Standalone mode, 0.8 for YARN The minimum ratio of registered resources (registered resources / total expected resources) - (resources are executors in yarn mode, CPU cores in standalone mode) + (resources are executors in yarn mode, CPU cores in standalone mode and coarse-grained mesos mode) to wait for before scheduling begins. Specified as a double between 0.0 and 1.0. Regardless of whether the minimum ratio of resources has been reached, the maximum amount of time it will wait before scheduling begins is controlled by config From ebdb664deea56dbf1be124b745711d5034608b90 Mon Sep 17 00:00:00 2001 From: Li Zhihui Date: Thu, 26 Feb 2015 12:53:12 +0800 Subject: [PATCH 3/3] Fix docs issue --- docs/configuration.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/configuration.md b/docs/configuration.md index b06fda7a37a4..ac3274451179 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -1018,7 +1018,7 @@ Apart from these, the following properties are also available, and may be useful spark.scheduler.minRegisteredResourcesRatio - 0.0 for Mesos and Standalone mode, 0.8 for YARN + 0.0 for coarse-grained Mesos and Standalone mode, 0.8 for YARN The minimum ratio of registered resources (registered resources / total expected resources) (resources are executors in yarn mode, CPU cores in standalone mode and coarse-grained mesos mode)