Skip to content

Commit 629c1dc

Browse files
Cleanup.
1 parent 99fb1a3 commit 629c1dc

File tree

4 files changed

+20
-38
lines changed

4 files changed

+20
-38
lines changed

core/src/main/scala/org/apache/spark/SparkContext.scala

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,6 @@ import scala.collection.mutable.HashMap
3434
import scala.reflect.{ClassTag, classTag}
3535
import scala.util.control.NonFatal
3636

37-
import com.google.common.annotations.VisibleForTesting
3837
import org.apache.hadoop.conf.Configuration
3938
import org.apache.hadoop.fs.Path
4039
import org.apache.hadoop.io.{ArrayWritable, BooleanWritable, BytesWritable, DoubleWritable,
@@ -226,7 +225,6 @@ class SparkContext(config: SparkConf) extends Logging with ExecutorAllocationCli
226225
private var _jars: Seq[String] = _
227226
private var _files: Seq[String] = _
228227
private var _shutdownHookRef: AnyRef = _
229-
private var _logUrls: Option[Predef.Map[String, String]] = None
230228

231229
/* ------------------------------------------------------------------------------------- *
232230
| Accessors and public fields. These provide access to the internal state of the |
@@ -317,15 +315,6 @@ class SparkContext(config: SparkConf) extends Logging with ExecutorAllocationCli
317315
_dagScheduler = ds
318316
}
319317

320-
@VisibleForTesting
321-
private[spark] def logUrls: Option[Predef.Map[String, String]] = _logUrls
322-
323-
@VisibleForTesting
324-
private[spark] def logUrls_=(logUrlsMap: Option[Predef.Map[String, String]]): Unit = {
325-
_logUrls = logUrlsMap
326-
logInfo(s"Setting log urls to ${_logUrls.get.mkString(" | ")}")
327-
}
328-
329318
def applicationId: String = _applicationId
330319
def applicationAttemptId: Option[String] = _applicationAttemptId
331320

core/src/main/scala/org/apache/spark/scheduler/SchedulerBackend.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ private[spark] trait SchedulerBackend {
5252
/**
5353
* Get the URLs for the driver logs. These URLs are used to display the links in the UI
5454
* Executors tab for the driver.
55-
* @return
55+
* @return The urls to the logs of the driver
5656
*/
5757
def getDriverLogUrls: Option[Map[String, String]] = None
5858

yarn/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala

Lines changed: 19 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -21,17 +21,13 @@ import scala.util.control.NonFatal
2121

2222
import java.io.{File, IOException}
2323
import java.lang.reflect.InvocationTargetException
24-
import java.net.{NetworkInterface, Socket, URL}
24+
import java.net.{Socket, URL}
2525
import java.util.concurrent.atomic.AtomicReference
2626

27-
import scala.collection.JavaConverters._
28-
2927
import org.apache.hadoop.fs.{FileSystem, Path}
3028
import org.apache.hadoop.yarn.api._
3129
import org.apache.hadoop.yarn.api.records._
32-
import org.apache.hadoop.yarn.client.api.YarnClient
3330
import org.apache.hadoop.yarn.conf.YarnConfiguration
34-
import org.apache.hadoop.yarn.util.ConverterUtils
3531

3632
import org.apache.spark.rpc._
3733
import org.apache.spark.{Logging, SecurityManager, SparkConf, SparkContext, SparkEnv}
@@ -266,26 +262,25 @@ private[spark] class ApplicationMaster(
266262

267263
private def runDriver(securityMgr: SecurityManager): Unit = {
268264
addAmIpFilter()
269-
if (isClusterMode) {
270-
userClassThread = startUserApplication()
271-
// This a bit hacky, but we need to wait until the spark.driver.port property has
272-
// been set by the Thread executing the user class.
273-
val sc = waitForSparkContextInitialized()
265+
userClassThread = startUserApplication()
274266

275-
// If there is no SparkContext at this point, just fail the app.
276-
if (sc == null) {
277-
finish(FinalApplicationStatus.FAILED,
278-
ApplicationMaster.EXIT_SC_NOT_INITED,
279-
"Timed out waiting for SparkContext.")
280-
} else {
281-
rpcEnv = sc.env.rpcEnv
282-
runAMEndpoint(
283-
sc.getConf.get("spark.driver.host"),
284-
sc.getConf.get("spark.driver.port"),
285-
isClusterMode = true)
286-
registerAM(sc.ui.map(_.appUIAddress).getOrElse(""), securityMgr)
287-
userClassThread.join()
288-
}
267+
// This a bit hacky, but we need to wait until the spark.driver.port property has
268+
// been set by the Thread executing the user class.
269+
val sc = waitForSparkContextInitialized()
270+
271+
// If there is no SparkContext at this point, just fail the app.
272+
if (sc == null) {
273+
finish(FinalApplicationStatus.FAILED,
274+
ApplicationMaster.EXIT_SC_NOT_INITED,
275+
"Timed out waiting for SparkContext.")
276+
} else {
277+
rpcEnv = sc.env.rpcEnv
278+
runAMEndpoint(
279+
sc.getConf.get("spark.driver.host"),
280+
sc.getConf.get("spark.driver.port"),
281+
isClusterMode = true)
282+
registerAM(sc.ui.map(_.appUIAddress).getOrElse(""), securityMgr)
283+
userClassThread.join()
289284
}
290285
}
291286

yarn/src/main/scala/org/apache/spark/scheduler/cluster/YarnClusterSchedulerBackend.scala

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,11 +21,9 @@ import java.net.NetworkInterface
2121

2222
import scala.collection.JavaConverters._
2323

24-
import org.apache.hadoop.yarn.api.ApplicationConstants
2524
import org.apache.hadoop.yarn.api.records.NodeState
2625
import org.apache.hadoop.yarn.client.api.YarnClient
2726
import org.apache.hadoop.yarn.conf.YarnConfiguration
28-
import org.apache.hadoop.yarn.util.ConverterUtils
2927

3028
import org.apache.spark.SparkContext
3129
import org.apache.spark.deploy.yarn.YarnSparkHadoopUtil

0 commit comments

Comments
 (0)