1717
1818package org .apache .spark .deploy .mesos
1919
20- import java .io .File
2120import java .util .concurrent .CountDownLatch
2221
2322import org .apache .spark
24- import org .apache .spark .deploy .mesos .MesosClusterDispatcher .ClusterDispatcherArguments
2523import org .apache .spark .deploy .mesos .ui .MesosClusterUI
2624import org .apache .spark .deploy .rest .mesos .MesosRestServer
2725import org .apache .spark .scheduler .cluster .mesos ._
28- import org .apache .spark .util .{ IntParam , SignalLogger , Utils }
26+ import org .apache .spark .util .SignalLogger
2927import org .apache .spark .{Logging , SecurityManager , SparkConf }
3028
3129/*
32- * A dispatcher that is responsible for managing and launching drivers, and is intended to
33- * be used for Mesos cluster mode. The dispatcher is launched by the user in the cluster,
34- * which it launches a [[MesosRestServer]] for listening for driver requests, and launches a
35- * [[MesosClusterScheduler]] to launch these drivers in the Mesos cluster.
30+ * A dispatcher that is responsible for managing and launching drivers, and is intended to be
31+ * used for Mesos cluster mode. The dispatcher is a long-running process started by the user in
32+ * the cluster independently of Spark applications.
33+ * It contains a [[MesosRestServer]] that listens for requests to submit drivers and a
34+ * [[MesosClusterScheduler]] that processes these requests by negotiating with the Mesos master
35+ * for resources.
3636 *
3737 * A typical new driver lifecycle is the following:
38- *
3938 * - Driver submitted via spark-submit talking to the [[MesosRestServer]]
4039 * - [[MesosRestServer]] queues the driver request to [[MesosClusterScheduler]]
4140 * - [[MesosClusterScheduler]] gets resource offers and launches the drivers that are in queue
4241 *
4342 * This dispatcher supports both Mesos fine-grain or coarse-grain mode as the mode is configurable
4443 * per driver launched.
4544 * This class is needed since Mesos doesn't manage frameworks, so the dispatcher acts as
46- * a daemon to launch drivers as Mesos frameworks upon request.
45+ * a daemon to launch drivers as Mesos frameworks upon request. The dispatcher is also started and
46+ * stopped by sbin/start-mesos-dispatcher and sbin/stop-mesos-dispatcher respectively.
4747 */
4848private [mesos] class MesosClusterDispatcher (
49- args : ClusterDispatcherArguments ,
49+ args : MesosClusterDispatcherArguments ,
5050 conf : SparkConf )
5151 extends Logging {
5252
53- private def publicAddress (conf : SparkConf , host : String ): String = {
53+ private def publicAddress (conf : SparkConf , defaultAddress : String ): String = {
5454 val envVar = conf.getenv(" SPARK_PUBLIC_DNS" )
55- if (envVar != null ) envVar else host
55+ if (envVar != null ) envVar else defaultAddress
5656 }
5757
58- private val recoveryMode = conf.get(" spark.deploy.recoveryMode" , " NONE" ).toUpperCase()
58+ private val recoveryMode = conf.get(" spark.mesos. deploy.recoveryMode" , " NONE" ).toUpperCase()
5959 logInfo(" Recovery mode in Mesos dispatcher set to: " + recoveryMode)
6060
6161 private val engineFactory = recoveryMode match {
6262 case " NONE" => new BlackHoleMesosClusterPersistenceEngineFactory
6363 case " ZOOKEEPER" => new ZookeeperMesosClusterPersistenceEngineFactory (conf)
6464 }
6565
66- private val scheduler = new MesosClusterSchedulerDriver (engineFactory, conf)
66+ private val scheduler = new MesosClusterScheduler (engineFactory, conf)
6767
6868 private val server = new MesosRestServer (args.host, args.port, conf, scheduler)
6969 private val webUi = new MesosClusterUI (
@@ -74,7 +74,6 @@ private[mesos] class MesosClusterDispatcher(
7474 scheduler)
7575
7676 private val shutdownLatch = new CountDownLatch (1 )
77- private val sparkHome = new File (Option (conf.getenv(" SPARK_HOME" )).getOrElse(" ." ))
7877
7978 def start (): Unit = {
8079 webUi.bind()
@@ -98,110 +97,26 @@ private[mesos] class MesosClusterDispatcher(
9897private [mesos] object MesosClusterDispatcher extends spark.Logging {
9998 def main (args : Array [String ]) {
10099 SignalLogger .register(log)
101-
102100 val conf = new SparkConf
103- val dispatcherArgs = new ClusterDispatcherArguments (args, conf)
104-
101+ val dispatcherArgs = new MesosClusterDispatcherArguments (args, conf)
105102 conf.setMaster(dispatcherArgs.masterUrl)
106- conf.setAppName(" Spark Cluster" )
107-
103+ conf.setAppName(dispatcherArgs.name)
108104 dispatcherArgs.zookeeperUrl.foreach { z =>
109- conf.set(" spark.deploy.recoveryMode" , " ZOOKEEPER" )
110- conf.set(" spark.deploy.zookeeper.url" , z)
105+ conf.set(" spark.mesos. deploy.recoveryMode" , " ZOOKEEPER" )
106+ conf.set(" spark.mesos. deploy.zookeeper.url" , z)
111107 }
112-
113108 val dispatcher = new MesosClusterDispatcher (
114109 dispatcherArgs,
115110 conf)
116-
117111 dispatcher.start()
118-
119112 val shutdownHook = new Thread () {
120113 override def run () {
121114 logInfo(" Shutdown hook is shutting down dispatcher" )
122115 dispatcher.stop()
123116 dispatcher.awaitShutdown()
124117 }
125118 }
126-
127119 Runtime .getRuntime.addShutdownHook(shutdownHook)
128-
129120 dispatcher.awaitShutdown()
130121 }
131-
132- private class ClusterDispatcherArguments (args : Array [String ], conf : SparkConf ) {
133- var host = Utils .localHostName()
134- var port = 7077
135- var webUiPort = 8081
136- var masterUrl : String = _
137- var zookeeperUrl : Option [String ] = None
138- var propertiesFile : String = _
139-
140- parse(args.toList)
141-
142- propertiesFile = Utils .loadDefaultSparkProperties(conf, propertiesFile)
143-
144- private def parse (args : List [String ]): Unit = args match {
145- case (" --host" | " -h" ) :: value :: tail =>
146- Utils .checkHost(value, " Please use hostname " + value)
147- host = value
148- parse(tail)
149-
150- case (" --port" | " -p" ) :: IntParam (value) :: tail =>
151- port = value
152- parse(tail)
153-
154- case (" --webui-port" | " -p" ) :: IntParam (value) :: tail =>
155- webUiPort = value
156- parse(tail)
157-
158- case (" --zk" | " -z" ) :: value :: tail =>
159- zookeeperUrl = Some (value)
160- parse(tail)
161-
162- case (" --master" | " -m" ) :: value :: tail =>
163- if (! value.startsWith(" mesos://" )) {
164- System .err.println(" Cluster dispatcher only supports mesos (uri begins with mesos://)" )
165- System .exit(1 )
166- }
167- masterUrl = value.stripPrefix(" mesos://" )
168- parse(tail)
169-
170- case (" --properties-file" ) :: value :: tail =>
171- propertiesFile = value
172- parse(tail)
173-
174- case (" --help" ) :: tail =>
175- printUsageAndExit(0 )
176-
177- case Nil => {
178- if (masterUrl == null ) {
179- System .err.println(" --master is required" )
180- System .exit(1 )
181- }
182- }
183-
184- case _ =>
185- printUsageAndExit(1 )
186- }
187-
188- /**
189- * Print usage and exit JVM with the given exit code.
190- */
191- def printUsageAndExit (exitCode : Int ): Unit = {
192- System .err.println(
193- " Usage: MesosClusterDispatcher [options]\n " +
194- " \n " +
195- " Options:\n " +
196- " -h HOST, --host HOST Hostname to listen on\n " +
197- " -p PORT, --port PORT Port to listen on (default: 7077)\n " +
198- " --webui-port WEBUI_PORT WebUI Port to listen on (default: 8081)\n " +
199- " -m --master MASTER URI for connecting to Mesos master\n " +
200- " -z --zk ZOOKEEPER Comma delimited URLs for connecting to \n " +
201- " Zookeeper for persistence\n " +
202- " --properties-file FILE Path to a custom Spark properties file.\n " +
203- " Default is conf/spark-defaults.conf." )
204- System .exit(exitCode)
205- }
206- }
207122}
0 commit comments