@@ -147,7 +147,7 @@ class LargeTask(stageId: Int) extends Task[Array[Byte]](stageId, 0) {
147147}
148148
149149class TaskSetManagerSuite extends FunSuite with LocalSparkContext with Logging {
150- import TaskLocality .{ANY , PROCESS_LOCAL , NOPREF , NODE_LOCAL , RACK_LOCAL }
150+ import TaskLocality .{ANY , PROCESS_LOCAL , NO_PREF , NODE_LOCAL , RACK_LOCAL }
151151
152152 private val conf = new SparkConf
153153
@@ -163,7 +163,7 @@ class TaskSetManagerSuite extends FunSuite with LocalSparkContext with Logging {
163163
164164 // Offer a host with NOPREF as the constraint,
165165 // we should get a nopref task immediately since that's what we only have
166- var taskOption = manager.resourceOffer(" exec1" , " host1" , NOPREF )
166+ var taskOption = manager.resourceOffer(" exec1" , " host1" , NO_PREF )
167167 assert(taskOption.isDefined)
168168
169169 // Tell it the task has finished
@@ -180,15 +180,15 @@ class TaskSetManagerSuite extends FunSuite with LocalSparkContext with Logging {
180180
181181 // First three offers should all find tasks
182182 for (i <- 0 until 3 ) {
183- var taskOption = manager.resourceOffer(" exec1" , " host1" , NOPREF )
183+ var taskOption = manager.resourceOffer(" exec1" , " host1" , NO_PREF )
184184 assert(taskOption.isDefined)
185185 val task = taskOption.get
186186 assert(task.executorId === " exec1" )
187187 }
188188 assert(sched.startedTasks.toSet === Set (0 , 1 , 2 ))
189189
190190 // Re-offer the host -- now we should get no more tasks
191- assert(manager.resourceOffer(" exec1" , " host1" , NOPREF ) === None )
191+ assert(manager.resourceOffer(" exec1" , " host1" , NO_PREF ) === None )
192192
193193 // Finish the first two tasks
194194 manager.handleSuccessfulTask(0 , createTaskResult(0 ))
@@ -245,7 +245,7 @@ class TaskSetManagerSuite extends FunSuite with LocalSparkContext with Logging {
245245 // Offer host2, exec3 again, at NODE_LOCAL level: we should get noPref task
246246 // after failing to find a node_Local task
247247 assert(manager.resourceOffer(" exec3" , " host2" , NODE_LOCAL ) == None )
248- assert(manager.resourceOffer(" exec3" , " host2" , NOPREF ).get.index == 3 )
248+ assert(manager.resourceOffer(" exec3" , " host2" , NO_PREF ).get.index == 3 )
249249 }
250250
251251 test(" we do not need to delay scheduling when we only have noPref tasks in the queue" ) {
@@ -262,7 +262,7 @@ class TaskSetManagerSuite extends FunSuite with LocalSparkContext with Logging {
262262 assert(manager.resourceOffer(" exec1" , " host1" , PROCESS_LOCAL ).get.index === 0 )
263263 assert(manager.resourceOffer(" exec3" , " host2" , PROCESS_LOCAL ).get.index === 1 )
264264 assert(manager.resourceOffer(" exec3" , " host2" , NODE_LOCAL ) == None )
265- assert(manager.resourceOffer(" exec3" , " host2" , NOPREF ).get.index === 2 )
265+ assert(manager.resourceOffer(" exec3" , " host2" , NO_PREF ).get.index === 2 )
266266 }
267267
268268 test(" delay scheduling with fallback" ) {
@@ -482,24 +482,24 @@ class TaskSetManagerSuite extends FunSuite with LocalSparkContext with Logging {
482482 val clock = new FakeClock
483483 val manager = new TaskSetManager (sched, taskSet, MAX_TASK_FAILURES , clock)
484484 // Only ANY is valid
485- assert(manager.myLocalityLevels.sameElements(Array (ANY )))
485+ assert(manager.myLocalityLevels.sameElements(Array (NO_PREF , ANY )))
486486 // Add a new executor
487487 sched.addExecutor(" execD" , " host1" )
488488 manager.executorAdded()
489489 // Valid locality should contain NODE_LOCAL and ANY
490- assert(manager.myLocalityLevels.sameElements(Array (NODE_LOCAL , ANY )))
490+ assert(manager.myLocalityLevels.sameElements(Array (NODE_LOCAL , NO_PREF , ANY )))
491491 // Add another executor
492492 sched.addExecutor(" execC" , " host2" )
493493 manager.executorAdded()
494494 // Valid locality should contain PROCESS_LOCAL, NODE_LOCAL, RACK_LOCAL and ANY
495- assert(manager.myLocalityLevels.sameElements(Array (PROCESS_LOCAL , NODE_LOCAL , RACK_LOCAL , ANY )))
495+ assert(manager.myLocalityLevels.sameElements(Array (PROCESS_LOCAL , NODE_LOCAL , NO_PREF , RACK_LOCAL , ANY )))
496496 // test if the valid locality is recomputed when the executor is lost
497497 sched.removeExecutor(" execC" )
498498 manager.executorLost(" execC" , " host2" )
499- assert(manager.myLocalityLevels.sameElements(Array (NODE_LOCAL , ANY )))
499+ assert(manager.myLocalityLevels.sameElements(Array (NODE_LOCAL , NO_PREF , ANY )))
500500 sched.removeExecutor(" execD" )
501501 manager.executorLost(" execD" , " host1" )
502- assert(manager.myLocalityLevels.sameElements(Array (ANY )))
502+ assert(manager.myLocalityLevels.sameElements(Array (NO_PREF , ANY )))
503503 }
504504
505505 test(" test RACK_LOCAL tasks" ) {
@@ -572,15 +572,15 @@ class TaskSetManagerSuite extends FunSuite with LocalSparkContext with Logging {
572572
573573 assert(manager.resourceOffer(" execA" , " host1" , PROCESS_LOCAL ).get.index === 0 )
574574 assert(manager.resourceOffer(" execA" , " host1" , NODE_LOCAL ) == None )
575- assert(manager.resourceOffer(" execA" , " host1" , NOPREF ) == None )
575+ assert(manager.resourceOffer(" execA" , " host1" , NO_PREF ) == None )
576576 clock.advance(LOCALITY_WAIT )
577577 // schedule a node local task
578578 assert(manager.resourceOffer(" execA" , " host1" , NODE_LOCAL ).get.index === 1 )
579579 manager.speculatableTasks += 1
580580 // schedule the nonPref task
581- assert(manager.resourceOffer(" execA" , " host1" , NOPREF ).get.index === 2 )
581+ assert(manager.resourceOffer(" execA" , " host1" , NO_PREF ).get.index === 2 )
582582 // schedule the speculative task
583- assert(manager.resourceOffer(" execB" , " host2" , NOPREF ).get.index === 1 )
583+ assert(manager.resourceOffer(" execB" , " host2" , NO_PREF ).get.index === 1 )
584584 clock.advance(LOCALITY_WAIT * 3 )
585585 // schedule non-local tasks
586586 assert(manager.resourceOffer(" execB" , " host2" , ANY ).get.index === 3 )
0 commit comments