|
17 | 17 |
|
18 | 18 | package org.apache.spark.deploy |
19 | 19 |
|
| 20 | +import java.net.URL |
| 21 | + |
20 | 22 | import scala.collection.mutable |
| 23 | +import scala.io.Source |
21 | 24 |
|
22 | | -import org.scalatest.{BeforeAndAfter, FunSuite} |
| 25 | +import org.scalatest.FunSuite |
23 | 26 |
|
24 | 27 | import org.apache.spark.scheduler.cluster.ExecutorInfo |
25 | 28 | import org.apache.spark.scheduler.{SparkListenerExecutorAdded, SparkListener} |
26 | | -import org.apache.spark.{SparkContext, LocalSparkContext} |
| 29 | +import org.apache.spark.{SparkConf, SparkContext, LocalSparkContext} |
27 | 30 |
|
28 | | -class LogUrlsStandaloneSuite extends FunSuite with LocalSparkContext with BeforeAndAfter { |
| 31 | +class LogUrlsStandaloneSuite extends FunSuite with LocalSparkContext { |
29 | 32 |
|
30 | 33 | /** Length of time to wait while draining listener events. */ |
31 | | - val WAIT_TIMEOUT_MILLIS = 10000 |
| 34 | + private val WAIT_TIMEOUT_MILLIS = 10000 |
32 | 35 |
|
33 | | - before { |
| 36 | + test("verify that correct log urls get propagated from workers") { |
34 | 37 | sc = new SparkContext("local-cluster[2,1,512]", "test") |
| 38 | + |
| 39 | + val listener = new SaveExecutorInfo |
| 40 | + sc.addSparkListener(listener) |
| 41 | + |
| 42 | + // Trigger a job so that executors get added |
| 43 | + sc.parallelize(1 to 100, 4).map(_.toString).count() |
| 44 | + |
| 45 | + assert(sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS)) |
| 46 | + listener.addedExecutorInfos.values.foreach { info => |
| 47 | + assert(info.logUrlMap.nonEmpty) |
| 48 | + // Browse to each URL to check that it's valid |
| 49 | + info.logUrlMap.foreach { case (logType, logUrl) => |
| 50 | + println(logUrl) |
| 51 | + val html = Source.fromURL(logUrl).mkString |
| 52 | + assert(html.contains(s"$logType log page")) |
| 53 | + } |
| 54 | + } |
35 | 55 | } |
36 | 56 |
|
37 | | - test("verify log urls get propagated from workers") { |
| 57 | + test("verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)") { |
| 58 | + val SPARK_PUBLIC_DNS = "public_dns" |
| 59 | + class MySparkConf extends SparkConf(false) { |
| 60 | + override def getenv(name: String) = { |
| 61 | + if (name == "SPARK_PUBLIC_DNS") SPARK_PUBLIC_DNS |
| 62 | + else super.getenv(name) |
| 63 | + } |
| 64 | + |
| 65 | + override def clone: SparkConf = { |
| 66 | + new MySparkConf().setAll(getAll) |
| 67 | + } |
| 68 | + } |
| 69 | + val conf = new MySparkConf() |
| 70 | + sc = new SparkContext("local-cluster[2,1,512]", "test", conf) |
| 71 | + |
38 | 72 | val listener = new SaveExecutorInfo |
39 | 73 | sc.addSparkListener(listener) |
40 | 74 |
|
41 | | - val rdd1 = sc.parallelize(1 to 100, 4) |
42 | | - val rdd2 = rdd1.map(_.toString) |
43 | | - rdd2.setName("Target RDD") |
44 | | - rdd2.count() |
| 75 | + // Trigger a job so that executors get added |
| 76 | + sc.parallelize(1 to 100, 4).map(_.toString).count() |
45 | 77 |
|
46 | 78 | assert(sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS)) |
47 | 79 | listener.addedExecutorInfos.values.foreach { info => |
48 | 80 | assert(info.logUrlMap.nonEmpty) |
| 81 | + info.logUrlMap.values.foreach { logUrl => |
| 82 | + assert(new URL(logUrl).getHost === SPARK_PUBLIC_DNS) |
| 83 | + } |
49 | 84 | } |
50 | 85 | } |
51 | 86 |
|
|
0 commit comments