Skip to content

Commit 557fdeb

Browse files
author
Marcelo Vanzin
committed
Cleanup a couple more constants.
1 parent be6068d commit 557fdeb

File tree

3 files changed

+20
-23
lines changed

3 files changed

+20
-23
lines changed

yarn/alpha/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocationHandler.scala

Lines changed: 8 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -58,11 +58,6 @@ private[yarn] class YarnAllocationHandler(
5858
map: collection.Map[String, collection.Set[SplitInfo]])
5959
extends YarnAllocator with Logging {
6060

61-
private val ANY_HOST = "*"
62-
// All requests are issued with same priority : we do not (yet) have any distinction between
63-
// request types (like map/reduce in hadoop for example)
64-
private val PRIORITY = 1
65-
6661
// These three are locked on allocatedHostToContainersMap. Complementary data structures
6762
// allocatedHostToContainersMap : containers which are running : host, Set<containerid>
6863
// allocatedContainerToHostMap: container to host mapping.
@@ -362,7 +357,7 @@ private[yarn] class YarnAllocationHandler(
362357
for (container <- hostContainers) {
363358
val candidateHost = container.getHostName
364359
val candidateNumContainers = container.getNumContainers
365-
assert(ANY_HOST != candidateHost)
360+
assert(YarnSparkHadoopUtil.ANY_HOST != candidateHost)
366361

367362
val rack = YarnSparkHadoopUtil.lookupRack(conf, candidateHost)
368363
if (rack != null) {
@@ -376,7 +371,8 @@ private[yarn] class YarnAllocationHandler(
376371
new ArrayBuffer[ResourceRequest](rackToCounts.size)
377372
for ((rack, count) <- rackToCounts){
378373
requestedContainers +=
379-
createResourceRequest(AllocationType.RACK, rack, count, PRIORITY)
374+
createResourceRequest(AllocationType.RACK, rack, count,
375+
YarnSparkHadoopUtil.RM_REQUEST_PRIORITY)
380376
}
381377

382378
requestedContainers.toList
@@ -407,7 +403,7 @@ private[yarn] class YarnAllocationHandler(
407403
logDebug("numExecutors: " + numExecutors + ", host preferences: " +
408404
preferredHostToCount.isEmpty)
409405
resourceRequests = List(createResourceRequest(
410-
AllocationType.ANY, null, numExecutors, PRIORITY))
406+
AllocationType.ANY, null, numExecutors, YarnSparkHadoopUtil.RM_REQUEST_PRIORITY))
411407
} else {
412408
// request for all hosts in preferred nodes and for numExecutors -
413409
// candidates.size, request by default allocation policy.
@@ -421,7 +417,7 @@ private[yarn] class YarnAllocationHandler(
421417
AllocationType.HOST,
422418
candidateHost,
423419
requiredCount,
424-
PRIORITY)
420+
YarnSparkHadoopUtil.RM_REQUEST_PRIORITY)
425421
}
426422
}
427423
val rackContainerRequests: List[ResourceRequest] = createRackResourceRequests(
@@ -431,7 +427,7 @@ private[yarn] class YarnAllocationHandler(
431427
AllocationType.ANY,
432428
resource = null,
433429
numExecutors,
434-
PRIORITY)
430+
YarnSparkHadoopUtil.RM_REQUEST_PRIORITY)
435431

436432
val containerRequests: ArrayBuffer[ResourceRequest] = new ArrayBuffer[ResourceRequest](
437433
hostContainerRequests.size + rackContainerRequests.size + 1)
@@ -481,7 +477,7 @@ private[yarn] class YarnAllocationHandler(
481477
// There must be a third request - which is ANY : that will be specially handled.
482478
requestType match {
483479
case AllocationType.HOST => {
484-
assert(ANY_HOST != resource)
480+
assert(YarnSparkHadoopUtil.ANY_HOST != resource)
485481
val hostname = resource
486482
val nodeLocal = createResourceRequestImpl(hostname, numExecutors, priority)
487483

@@ -495,7 +491,7 @@ private[yarn] class YarnAllocationHandler(
495491
createResourceRequestImpl(rack, numExecutors, priority)
496492
}
497493
case AllocationType.ANY => createResourceRequestImpl(
498-
ANY_HOST, numExecutors, priority)
494+
YarnSparkHadoopUtil.ANY_HOST, numExecutors, priority)
499495
case _ => throw new IllegalArgumentException(
500496
"Unexpected/unsupported request type: " + requestType)
501497
}

yarn/common/src/main/scala/org/apache/spark/deploy/yarn/YarnSparkHadoopUtil.scala

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -87,6 +87,12 @@ object YarnSparkHadoopUtil {
8787
// Additional memory overhead - in mb.
8888
val DEFAULT_MEMORY_OVERHEAD = 384
8989

90+
val ANY_HOST = "*"
91+
92+
// All RM requests are issued with same priority : we do not (yet) have any distinction between
93+
// request types (like map/reduce in hadoop for example)
94+
val RM_REQUEST_PRIORITY = 1
95+
9096
// Host to rack map - saved from allocation requests. We are expecting this not to change.
9197
// Note that it is possible for this to change : and ResurceManager will indicate that to us via
9298
// update response to allocate. But we are punting on handling that for now.

yarn/stable/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocationHandler.scala

Lines changed: 6 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -59,11 +59,6 @@ private[yarn] class YarnAllocationHandler(
5959
map: collection.Map[String, collection.Set[SplitInfo]])
6060
extends YarnAllocator with Logging {
6161

62-
private val ANY_HOST = "*"
63-
// All requests are issued with same priority : we do not (yet) have any distinction between
64-
// request types (like map/reduce in hadoop for example)
65-
private val PRIORITY = 1
66-
6762
// These three are locked on allocatedHostToContainersMap. Complementary data structures
6863
// allocatedHostToContainersMap : containers which are running : host, Set<containerid>
6964
// allocatedContainerToHostMap: container to host mapping.
@@ -390,7 +385,7 @@ private[yarn] class YarnAllocationHandler(
390385

391386
for (container <- hostContainers) {
392387
val candidateHost = container.getNodes.last
393-
assert(ANY_HOST != candidateHost)
388+
assert(YarnSparkHadoopUtil.ANY_HOST != candidateHost)
394389

395390
val rack = YarnSparkHadoopUtil.lookupRack(conf, candidateHost)
396391
if (rack != null) {
@@ -406,7 +401,7 @@ private[yarn] class YarnAllocationHandler(
406401
AllocationType.RACK,
407402
rack,
408403
count,
409-
PRIORITY)
404+
YarnSparkHadoopUtil.RM_REQUEST_PRIORITY)
410405
}
411406

412407
requestedContainers
@@ -437,7 +432,7 @@ private[yarn] class YarnAllocationHandler(
437432
AllocationType.ANY,
438433
resource = null,
439434
numExecutors,
440-
PRIORITY).toList
435+
YarnSparkHadoopUtil.RM_REQUEST_PRIORITY).toList
441436
} else {
442437
// Request for all hosts in preferred nodes and for numExecutors -
443438
// candidates.size, request by default allocation policy.
@@ -450,7 +445,7 @@ private[yarn] class YarnAllocationHandler(
450445
AllocationType.HOST,
451446
candidateHost,
452447
requiredCount,
453-
PRIORITY)
448+
YarnSparkHadoopUtil.RM_REQUEST_PRIORITY)
454449
}
455450
}
456451
val rackContainerRequests: List[ContainerRequest] = createRackResourceRequests(
@@ -460,7 +455,7 @@ private[yarn] class YarnAllocationHandler(
460455
AllocationType.ANY,
461456
resource = null,
462457
numExecutors,
463-
PRIORITY)
458+
YarnSparkHadoopUtil.RM_REQUEST_PRIORITY)
464459

465460
val containerRequestBuffer = new ArrayBuffer[ContainerRequest](
466461
hostContainerRequests.size + rackContainerRequests.size() + anyContainerRequests.size)
@@ -509,7 +504,7 @@ private[yarn] class YarnAllocationHandler(
509504
// There must be a third request, which is ANY. That will be specially handled.
510505
requestType match {
511506
case AllocationType.HOST => {
512-
assert(ANY_HOST != resource)
507+
assert(YarnSparkHadoopUtil.ANY_HOST != resource)
513508
val hostname = resource
514509
val nodeLocal = constructContainerRequests(
515510
Array(hostname),

0 commit comments

Comments
 (0)