diff --git a/core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala b/core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala index ecb8ac0121459..dcb5e2f7a9b01 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala @@ -693,6 +693,11 @@ private[spark] class DAGScheduler( val jobId = nextJobId.getAndIncrement() if (partitions.size == 0) { + val time = clock.getTimeMillis() + listenerBus.post( + SparkListenerJobStart(jobId, time, Seq[StageInfo](), properties)) + listenerBus.post( + SparkListenerJobEnd(jobId, time, JobSucceeded)) // Return immediately if the job is running 0 tasks return new JobWaiter[U](this, jobId, 0, resultHandler) } diff --git a/core/src/main/scala/org/apache/spark/status/AppStatusListener.scala b/core/src/main/scala/org/apache/spark/status/AppStatusListener.scala index 1067cdc84ceb2..a841b91bdcf58 100644 --- a/core/src/main/scala/org/apache/spark/status/AppStatusListener.scala +++ b/core/src/main/scala/org/apache/spark/status/AppStatusListener.scala @@ -319,7 +319,7 @@ private[spark] class AppStatusListener( } val lastStageInfo = event.stageInfos.sortBy(_.stageId).lastOption - val lastStageName = lastStageInfo.map(_.name).getOrElse("(Unknown Stage Name)") + val jobName = lastStageInfo.map(_.name).getOrElse("") val jobGroup = Option(event.properties) .flatMap { p => Option(p.getProperty(SparkContext.SPARK_JOB_GROUP_ID)) } val sqlExecutionId = Option(event.properties) @@ -327,7 +327,7 @@ private[spark] class AppStatusListener( val job = new LiveJob( event.jobId, - lastStageName, + jobName, if (event.time > 0) Some(new Date(event.time)) else None, event.stageIds, jobGroup, diff --git a/core/src/main/scala/org/apache/spark/ui/UIUtils.scala b/core/src/main/scala/org/apache/spark/ui/UIUtils.scala index 967435030bc4d..54f2750831d64 100644 --- a/core/src/main/scala/org/apache/spark/ui/UIUtils.scala +++ b/core/src/main/scala/org/apache/spark/ui/UIUtils.scala @@ -363,7 +363,8 @@ private[spark] object UIUtils extends Logging { skipped: Int, reasonToNumKilled: Map[String, Int], total: Int): Seq[Node] = { - val completeWidth = "width: %s%%".format((completed.toDouble/total)*100) + val ratio = if (total == 0) 100.0 else (completed.toDouble/total)*100 + val completeWidth = "width: %s%%".format(ratio) // started + completed can be > total when there are speculative tasks val boundedStarted = math.min(started, total - completed) val startWidth = "width: %s%%".format((boundedStarted.toDouble/total)*100) diff --git a/core/src/main/scala/org/apache/spark/ui/jobs/StagePage.scala b/core/src/main/scala/org/apache/spark/ui/jobs/StagePage.scala index 2c94853f312e7..e399f7ee1920f 100644 --- a/core/src/main/scala/org/apache/spark/ui/jobs/StagePage.scala +++ b/core/src/main/scala/org/apache/spark/ui/jobs/StagePage.scala @@ -857,8 +857,13 @@ private[spark] object ApiHelper { } def lastStageNameAndDescription(store: AppStatusStore, job: JobData): (String, String) = { - val stage = store.asOption(store.stageAttempt(job.stageIds.max, 0)._1) - (stage.map(_.name).getOrElse(""), stage.flatMap(_.description).getOrElse(job.name)) + // Some jobs have only 0 partitions. + if (job.stageIds.isEmpty) { + ("", job.name) + } else { + val stage = store.asOption(store.stageAttempt(job.stageIds.max, 0)._1) + (stage.map(_.name).getOrElse(""), stage.flatMap(_.description).getOrElse(job.name)) + } } }