-
Notifications
You must be signed in to change notification settings - Fork 188
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Prevent new host overloading #1822
Changes from 1 commit
3a7a125
e610cf1
339dd44
d814a9d
50ba080
39b3535
bee2269
6c02f60
fe45d81
1414edf
4650639
890504b
5d7de80
572407e
3612d54
d527799
d055ff0
0127517
f4de9ee
df9c3a0
b1fbeda
5ec231b
9ed693f
b80e8c9
a13440a
8cae538
4ea2166
5b822c3
e2e6794
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -29,6 +29,7 @@ | |
import com.hubspot.singularity.RequestUtilization; | ||
import com.hubspot.singularity.SingularityDeployStatistics; | ||
import com.hubspot.singularity.SingularityPendingTaskId; | ||
import com.hubspot.singularity.SingularitySlave; | ||
import com.hubspot.singularity.SingularitySlaveUsage; | ||
import com.hubspot.singularity.SingularitySlaveUsageWithId; | ||
import com.hubspot.singularity.SingularityTask; | ||
|
@@ -41,13 +42,15 @@ | |
import com.hubspot.singularity.config.MesosConfiguration; | ||
import com.hubspot.singularity.config.SingularityConfiguration; | ||
import com.hubspot.singularity.data.DeployManager; | ||
import com.hubspot.singularity.data.SlaveManager; | ||
import com.hubspot.singularity.data.TaskManager; | ||
import com.hubspot.singularity.data.UsageManager; | ||
import com.hubspot.singularity.helpers.MesosUtils; | ||
import com.hubspot.singularity.helpers.SingularityMesosTaskHolder; | ||
import com.hubspot.singularity.mesos.SingularitySlaveUsageWithCalculatedScores.MaxProbableUsage; | ||
import com.hubspot.singularity.scheduler.SingularityLeaderCache; | ||
import com.hubspot.singularity.scheduler.SingularityScheduler; | ||
import com.hubspot.singularity.scheduler.SingularityUsagePoller; | ||
|
||
@Singleton | ||
public class SingularityMesosOfferScheduler { | ||
|
@@ -65,6 +68,8 @@ public class SingularityMesosOfferScheduler { | |
private final SingularitySlaveAndRackManager slaveAndRackManager; | ||
private final SingularitySlaveAndRackHelper slaveAndRackHelper; | ||
private final SingularityTaskSizeOptimizer taskSizeOptimizer; | ||
private final SingularityUsagePoller usagePoller; | ||
private final SlaveManager slaveManager; | ||
private final UsageManager usageManager; | ||
private final DeployManager deployManager; | ||
private final SingularitySchedulerLock lock; | ||
|
@@ -89,6 +94,8 @@ public SingularityMesosOfferScheduler(MesosConfiguration mesosConfiguration, | |
SingularityTaskSizeOptimizer taskSizeOptimizer, | ||
SingularitySlaveAndRackHelper slaveAndRackHelper, | ||
SingularityLeaderCache leaderCache, | ||
SingularityUsagePoller usagePoller, | ||
SlaveManager slaveManager, | ||
UsageManager usageManager, | ||
DeployManager deployManager, | ||
SingularitySchedulerLock lock) { | ||
|
@@ -102,6 +109,8 @@ public SingularityMesosOfferScheduler(MesosConfiguration mesosConfiguration, | |
this.slaveAndRackManager = slaveAndRackManager; | ||
this.taskSizeOptimizer = taskSizeOptimizer; | ||
this.leaderCache = leaderCache; | ||
this.usagePoller = usagePoller; | ||
this.slaveManager = slaveManager; | ||
this.slaveAndRackHelper = slaveAndRackHelper; | ||
this.taskPrioritizer = taskPrioritizer; | ||
this.usageManager = usageManager; | ||
|
@@ -180,7 +189,8 @@ public Collection<SingularityOfferHolder> checkOffers(final Collection<Offer> of | |
mesosConfiguration.getScoreUsingSystemLoad(), | ||
getMaxProbableUsageForSlave(activeTaskIds, requestUtilizations, offerHolders.get(usageWithId.getSlaveId()).getSanitizedHost()), | ||
mesosConfiguration.getLoad5OverloadedThreshold(), | ||
mesosConfiguration.getLoad1OverloadedThreshold() | ||
mesosConfiguration.getLoad1OverloadedThreshold(), | ||
usageWithId.getTimestamp() | ||
) | ||
)); | ||
|
||
|
@@ -196,23 +206,43 @@ public Collection<SingularityOfferHolder> checkOffers(final Collection<Offer> of | |
List<CompletableFuture<Void>> scoringFutures = new ArrayList<>(); | ||
AtomicReference<Throwable> scoringException = new AtomicReference<>(null); | ||
for (SingularityOfferHolder offerHolder : offerHolders.values()) { | ||
if (!isOfferFull(offerHolder)) { | ||
scoringFutures.add( | ||
offerScoringSemaphore.call( | ||
() -> CompletableFuture.runAsync(() -> { | ||
try { | ||
double score = calculateScore(offerHolder, currentSlaveUsagesBySlaveId, tasksPerOfferHost, taskRequestHolder, activeTaskIdsForRequest, requestUtilizations.get(taskRequestHolder.getTaskRequest().getRequest().getId())); | ||
if (score != 0) { | ||
scorePerOffer.put(offerHolder.getSlaveId(), score); | ||
} | ||
} catch (Throwable t) { | ||
LOG.error("Uncaught exception while scoring offers", t); | ||
scoringException.set(t); | ||
} | ||
}, | ||
offerScoringExecutor | ||
))); | ||
if (isOfferFull(offerHolder)) { | ||
continue; | ||
} | ||
Optional<SingularitySlaveUsageWithCalculatedScores> maybeSlaveUsage = Optional.fromNullable(currentSlaveUsagesBySlaveId.get(offerHolder.getSlaveId())); | ||
|
||
if (taskManager.getActiveTasks().stream() | ||
.anyMatch(t -> t.getTaskRequest().getDeploy().getTimestamp().or(System.currentTimeMillis()) > maybeSlaveUsage.get().getTimestamp() | ||
&& t.getMesosTask().getSlaveId().getValue().equals(offerHolder.getSlaveId()))) { | ||
Optional<SingularitySlave> maybeSlave = slaveManager.getSlave(offerHolder.getSlaveId()); | ||
if (maybeSlave.isPresent()) { | ||
usagePoller.getSlaveUsage(maybeSlave.get()); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. will probably want to put this in There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Another note, this would be a good thing to stick inside the completable future below. It's a good candidate to make async since we are now io bound on an api call and cpu bound on the calculations |
||
} | ||
continue; | ||
} | ||
|
||
// if (maybeSlaveUsage.isPresent() && System.currentTimeMillis() - maybeSlaveUsage.get().getTimestamp() > configuration.getMaxSlaveUsageMetricAgeMs()) { | ||
// Optional<SingularitySlave> maybeSlave = slaveManager.getSlave(offerHolder.getSlaveId()); | ||
// if (maybeSlave.isPresent()) { | ||
// usagePoller.getSlaveUsage(maybeSlave.get()); | ||
// } | ||
// continue; | ||
// } | ||
scoringFutures.add( | ||
offerScoringSemaphore.call( | ||
() -> CompletableFuture.runAsync(() -> { | ||
try { | ||
double score = calculateScore(offerHolder, currentSlaveUsagesBySlaveId, tasksPerOfferHost, taskRequestHolder, activeTaskIdsForRequest, requestUtilizations.get(taskRequestHolder.getTaskRequest().getRequest().getId())); | ||
if (score != 0) { | ||
scorePerOffer.put(offerHolder.getSlaveId(), score); | ||
} | ||
} catch (Throwable t) { | ||
LOG.error("Uncaught exception while scoring offers", t); | ||
scoringException.set(t); | ||
} | ||
}, | ||
offerScoringExecutor | ||
))); | ||
} | ||
|
||
CompletableFutures.allOf(scoringFutures).join(); | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -26,7 +26,14 @@ class SingularitySlaveUsageWithCalculatedScores { | |
private final double load5Threshold; | ||
private final double load1Threshold; | ||
|
||
SingularitySlaveUsageWithCalculatedScores(SingularitySlaveUsage slaveUsage, MachineLoadMetric systemLoadMetric, MaxProbableUsage maxProbableTaskUsage, double load5Threshold, double load1Threshold) { | ||
private final long timestamp; | ||
|
||
SingularitySlaveUsageWithCalculatedScores(SingularitySlaveUsage slaveUsage, | ||
MachineLoadMetric systemLoadMetric, | ||
MaxProbableUsage maxProbableTaskUsage, | ||
double load5Threshold, | ||
double load1Threshold, | ||
long timestamp) { | ||
this.slaveUsage = slaveUsage; | ||
this.systemLoadMetric = systemLoadMetric; | ||
this.maxProbableTaskUsage = maxProbableTaskUsage; | ||
|
@@ -39,6 +46,7 @@ class SingularitySlaveUsageWithCalculatedScores { | |
} | ||
this.load5Threshold = load5Threshold; | ||
this.load1Threshold = load1Threshold; | ||
this.timestamp = timestamp; | ||
} | ||
|
||
boolean isCpuOverloaded(double estimatedNumCpusToAdd) { | ||
|
@@ -121,6 +129,10 @@ SingularitySlaveUsage getSlaveUsage() { | |
return diskInUseScore; | ||
} | ||
|
||
long getTimestamp() { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. you should be able to just do getSlaveUsage().getTimestamp() instead of having to store it in two places |
||
return timestamp; | ||
} | ||
|
||
void addEstimatedCpuUsage(double estimatedAddedCpus) { | ||
this.estimatedAddedCpusUsage += estimatedAddedCpus; | ||
} | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -117,7 +117,7 @@ public void runActionOnPoll() { | |
usageHelper.getSlavesToTrackUsageFor().forEach((slave) -> { | ||
usageFutures.add(usageCollectionSemaphore.call(() -> | ||
CompletableFuture.runAsync(() -> { | ||
collectSlaveUage(slave, now, utilizationPerRequestId, previousUtilizations, overLoadedHosts, totalMemBytesUsed, totalMemBytesAvailable, | ||
collectSlaveUsage(slave, now, utilizationPerRequestId, previousUtilizations, overLoadedHosts, totalMemBytesUsed, totalMemBytesAvailable, | ||
totalCpuUsed, totalCpuAvailable, totalDiskBytesUsed, totalDiskBytesAvailable); | ||
}, usageExecutor) | ||
)); | ||
|
@@ -126,15 +126,35 @@ public void runActionOnPoll() { | |
CompletableFutures.allOf(usageFutures).join(); | ||
|
||
usageManager.saveClusterUtilization( | ||
getClusterUtilization(utilizationPerRequestId, totalMemBytesUsed.get(), totalMemBytesAvailable.get(), totalCpuUsed.get(), totalCpuAvailable.get(), totalDiskBytesUsed.get(), totalDiskBytesAvailable | ||
.get(), now)); | ||
getClusterUtilization( | ||
utilizationPerRequestId, totalMemBytesUsed.get(), totalMemBytesAvailable.get(), | ||
totalCpuUsed.get(), totalCpuAvailable.get(), totalDiskBytesUsed.get(), totalDiskBytesAvailable.get(), now)); | ||
utilizationPerRequestId.values().forEach(usageManager::saveRequestUtilization); | ||
|
||
if (configuration.isShuffleTasksForOverloadedSlaves()) { | ||
shuffleTasksOnOverloadedHosts(overLoadedHosts); | ||
} | ||
} | ||
|
||
public CompletableFuture<Void> getSlaveUsage(SingularitySlave slave) { | ||
return usageCollectionSemaphore.call(() -> | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. For the individual method, not sure we want to make it completely async like the larger ones. This method will likely not be called from the same context as the poller itself, so it should probably fall under a different semaphore (e.g. the offer scoring one) if we want it to be async |
||
CompletableFuture.runAsync(() -> { | ||
collectSlaveUsage( | ||
slave, | ||
System.currentTimeMillis(), | ||
new ConcurrentHashMap<>(), | ||
usageManager.getRequestUtilizations(), | ||
new ConcurrentHashMap<>(), | ||
new AtomicLong(), | ||
new AtomicLong(), | ||
new AtomicDouble(), | ||
new AtomicDouble(), | ||
new AtomicLong(), | ||
new AtomicLong()); | ||
}, usageExecutor) | ||
); | ||
} | ||
|
||
public void runWithRequestLock(Runnable function, String requestId) { | ||
ReentrantLock lock = requestLocks.computeIfAbsent(requestId, (r) -> new ReentrantLock()); | ||
lock.lock(); | ||
|
@@ -145,17 +165,17 @@ public void runWithRequestLock(Runnable function, String requestId) { | |
} | ||
} | ||
|
||
private void collectSlaveUage(SingularitySlave slave, | ||
long now, | ||
Map<String, RequestUtilization> utilizationPerRequestId, | ||
Map<String, RequestUtilization> previousUtilizations, | ||
Map<SingularitySlaveUsage, List<TaskIdWithUsage>> overLoadedHosts, | ||
AtomicLong totalMemBytesUsed, | ||
AtomicLong totalMemBytesAvailable, | ||
AtomicDouble totalCpuUsed, | ||
AtomicDouble totalCpuAvailable, | ||
AtomicLong totalDiskBytesUsed, | ||
AtomicLong totalDiskBytesAvailable) { | ||
private void collectSlaveUsage(SingularitySlave slave, | ||
long now, | ||
Map<String, RequestUtilization> utilizationPerRequestId, | ||
Map<String, RequestUtilization> previousUtilizations, | ||
Map<SingularitySlaveUsage, List<TaskIdWithUsage>> overLoadedHosts, | ||
AtomicLong totalMemBytesUsed, | ||
AtomicLong totalMemBytesAvailable, | ||
AtomicDouble totalCpuUsed, | ||
AtomicDouble totalCpuAvailable, | ||
AtomicLong totalDiskBytesUsed, | ||
AtomicLong totalDiskBytesAvailable) { | ||
Optional<Long> memoryMbTotal = Optional.absent(); | ||
Optional<Double> cpusTotal = Optional.absent(); | ||
Optional<Long> diskMbTotal = Optional.absent(); | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Second thoughts about the setup. Would it make sense to instead collect additional usages in this block instead? I'm realizing that the loop below would be called for each pending task. If we hit a case where collecting a particular slave usage is throwing exceptions or timing out, we will continue to recheck it for each pending task. Whereas, if we check in this block instead, we can just omit that up front and leave the block below as it was previously.
If we move the usage collection here, we'll likely want to convert this from parallelStream to a list of CompletableFutures like below to have better control over the concurrency