File tree Expand file tree Collapse file tree 1 file changed +3
-3
lines changed
core/src/main/scala/org/apache/spark/scheduler Expand file tree Collapse file tree 1 file changed +3
-3
lines changed Original file line number Diff line number Diff line change @@ -138,7 +138,7 @@ class DAGScheduler(
138138 taskScheduler.setDAGScheduler(this )
139139
140140 // Number of map, reduce tasks above which we do not assign preferred locations
141- // based on map output sizes. We limit the size of jobs for which assign preferred locations
141+ // based on map output sizes. We limit the size of jobs for which assign preferred locations
142142 // as sorting the locations by size becomes expensive.
143143 private [this ] val SHUFFLE_PREF_MAP_THRESHOLD = 1000
144144 // NOTE: This should be less than 2000 as we use HighlyCompressedMapStatus beyond that
@@ -1411,8 +1411,8 @@ class DAGScheduler(
14111411 if (rdd.partitions.size < SHUFFLE_PREF_REDUCE_THRESHOLD &&
14121412 s.rdd.partitions.size < SHUFFLE_PREF_MAP_THRESHOLD ) {
14131413 // Get the preferred map output locations for this reducer
1414- val topLocsForReducer = mapOutputTracker.getLocationsWithLargestOutputs(s.shuffleId, partition,
1415- rdd.partitions.size, NUM_REDUCER_PREF_LOCS )
1414+ val topLocsForReducer = mapOutputTracker.getLocationsWithLargestOutputs(s.shuffleId,
1415+ partition, rdd.partitions.size, NUM_REDUCER_PREF_LOCS )
14161416 if (topLocsForReducer.nonEmpty) {
14171417 return topLocsForReducer.get.map(loc => TaskLocation (loc.host, loc.executorId))
14181418 }
You can’t perform that action at this time.
0 commit comments