-
Notifications
You must be signed in to change notification settings - Fork 188
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Prevent new host overloading #1822
Changes from 3 commits
3a7a125
e610cf1
339dd44
d814a9d
50ba080
39b3535
bee2269
6c02f60
fe45d81
1414edf
4650639
890504b
5d7de80
572407e
3612d54
d527799
d055ff0
0127517
f4de9ee
df9c3a0
b1fbeda
5ec231b
9ed693f
b80e8c9
a13440a
8cae538
4ea2166
5b822c3
e2e6794
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -10,6 +10,7 @@ | |
import java.util.concurrent.ExecutorService; | ||
import java.util.concurrent.Executors; | ||
import java.util.concurrent.atomic.AtomicInteger; | ||
import java.util.concurrent.atomic.AtomicLong; | ||
import java.util.concurrent.atomic.AtomicReference; | ||
import java.util.function.Function; | ||
import java.util.stream.Collectors; | ||
|
@@ -22,13 +23,15 @@ | |
|
||
import com.google.common.annotations.VisibleForTesting; | ||
import com.google.common.base.Optional; | ||
import com.google.common.util.concurrent.AtomicDouble; | ||
import com.google.common.util.concurrent.ThreadFactoryBuilder; | ||
import com.google.inject.Inject; | ||
import com.hubspot.mesos.Resources; | ||
import com.hubspot.singularity.RequestType; | ||
import com.hubspot.singularity.RequestUtilization; | ||
import com.hubspot.singularity.SingularityDeployStatistics; | ||
import com.hubspot.singularity.SingularityPendingTaskId; | ||
import com.hubspot.singularity.SingularitySlave; | ||
import com.hubspot.singularity.SingularitySlaveUsage; | ||
import com.hubspot.singularity.SingularitySlaveUsageWithId; | ||
import com.hubspot.singularity.SingularityTask; | ||
|
@@ -41,13 +44,15 @@ | |
import com.hubspot.singularity.config.MesosConfiguration; | ||
import com.hubspot.singularity.config.SingularityConfiguration; | ||
import com.hubspot.singularity.data.DeployManager; | ||
import com.hubspot.singularity.data.SlaveManager; | ||
import com.hubspot.singularity.data.TaskManager; | ||
import com.hubspot.singularity.data.UsageManager; | ||
import com.hubspot.singularity.helpers.MesosUtils; | ||
import com.hubspot.singularity.helpers.SingularityMesosTaskHolder; | ||
import com.hubspot.singularity.mesos.SingularitySlaveUsageWithCalculatedScores.MaxProbableUsage; | ||
import com.hubspot.singularity.scheduler.SingularityLeaderCache; | ||
import com.hubspot.singularity.scheduler.SingularityScheduler; | ||
import com.hubspot.singularity.scheduler.SingularityUsagePoller; | ||
|
||
@Singleton | ||
public class SingularityMesosOfferScheduler { | ||
|
@@ -65,6 +70,8 @@ public class SingularityMesosOfferScheduler { | |
private final SingularitySlaveAndRackManager slaveAndRackManager; | ||
private final SingularitySlaveAndRackHelper slaveAndRackHelper; | ||
private final SingularityTaskSizeOptimizer taskSizeOptimizer; | ||
private final SingularityUsagePoller usagePoller; | ||
private final SlaveManager slaveManager; | ||
private final UsageManager usageManager; | ||
private final DeployManager deployManager; | ||
private final SingularitySchedulerLock lock; | ||
|
@@ -89,6 +96,8 @@ public SingularityMesosOfferScheduler(MesosConfiguration mesosConfiguration, | |
SingularityTaskSizeOptimizer taskSizeOptimizer, | ||
SingularitySlaveAndRackHelper slaveAndRackHelper, | ||
SingularityLeaderCache leaderCache, | ||
SingularityUsagePoller usagePoller, | ||
SlaveManager slaveManager, | ||
UsageManager usageManager, | ||
DeployManager deployManager, | ||
SingularitySchedulerLock lock) { | ||
|
@@ -102,6 +111,8 @@ public SingularityMesosOfferScheduler(MesosConfiguration mesosConfiguration, | |
this.slaveAndRackManager = slaveAndRackManager; | ||
this.taskSizeOptimizer = taskSizeOptimizer; | ||
this.leaderCache = leaderCache; | ||
this.usagePoller = usagePoller; | ||
this.slaveManager = slaveManager; | ||
this.slaveAndRackHelper = slaveAndRackHelper; | ||
this.taskPrioritizer = taskPrioritizer; | ||
this.usageManager = usageManager; | ||
|
@@ -180,7 +191,8 @@ public Collection<SingularityOfferHolder> checkOffers(final Collection<Offer> of | |
mesosConfiguration.getScoreUsingSystemLoad(), | ||
getMaxProbableUsageForSlave(activeTaskIds, requestUtilizations, offerHolders.get(usageWithId.getSlaveId()).getSanitizedHost()), | ||
mesosConfiguration.getLoad5OverloadedThreshold(), | ||
mesosConfiguration.getLoad1OverloadedThreshold() | ||
mesosConfiguration.getLoad1OverloadedThreshold(), | ||
usageWithId.getTimestamp() | ||
) | ||
)); | ||
|
||
|
@@ -196,23 +208,11 @@ public Collection<SingularityOfferHolder> checkOffers(final Collection<Offer> of | |
List<CompletableFuture<Void>> scoringFutures = new ArrayList<>(); | ||
AtomicReference<Throwable> scoringException = new AtomicReference<>(null); | ||
for (SingularityOfferHolder offerHolder : offerHolders.values()) { | ||
if (!isOfferFull(offerHolder)) { | ||
scoringFutures.add( | ||
offerScoringSemaphore.call( | ||
() -> CompletableFuture.runAsync(() -> { | ||
try { | ||
double score = calculateScore(offerHolder, currentSlaveUsagesBySlaveId, tasksPerOfferHost, taskRequestHolder, activeTaskIdsForRequest, requestUtilizations.get(taskRequestHolder.getTaskRequest().getRequest().getId())); | ||
if (score != 0) { | ||
scorePerOffer.put(offerHolder.getSlaveId(), score); | ||
} | ||
} catch (Throwable t) { | ||
LOG.error("Uncaught exception while scoring offers", t); | ||
scoringException.set(t); | ||
} | ||
}, | ||
offerScoringExecutor | ||
))); | ||
} | ||
scoringFutures.add(offerScoringSemaphore.call(() -> | ||
CompletableFuture.supplyAsync(() -> { | ||
return calculateScore(offerHolders, requestUtilizations, activeTaskIds, currentSlaveUsagesBySlaveId, tasksPerOfferHost, taskRequestHolder, scorePerOffer, activeTaskIdsForRequest, scoringException, offerHolder); | ||
}, | ||
offerScoringExecutor))); | ||
} | ||
|
||
CompletableFutures.allOf(scoringFutures).join(); | ||
|
@@ -240,6 +240,71 @@ public Collection<SingularityOfferHolder> checkOffers(final Collection<Offer> of | |
return offerHolders.values(); | ||
} | ||
|
||
private Void calculateScore( | ||
Map<String, SingularityOfferHolder> offerHolders, | ||
Map<String, RequestUtilization> requestUtilizations, | ||
List<SingularityTaskId> activeTaskIds, | ||
Map<String, SingularitySlaveUsageWithCalculatedScores> currentSlaveUsagesBySlaveId, | ||
Map<String, Integer> tasksPerOfferHost, | ||
SingularityTaskRequestHolder taskRequestHolder, | ||
Map<String, Double> scorePerOffer, | ||
List<SingularityTaskId> activeTaskIdsForRequest, | ||
AtomicReference<Throwable> scoringException, | ||
SingularityOfferHolder offerHolder) { | ||
if (isOfferFull(offerHolder)) { | ||
return null; | ||
} | ||
String slaveId = offerHolder.getSlaveId(); | ||
Optional<SingularitySlaveUsageWithCalculatedScores> maybeSlaveUsage = Optional.fromNullable(currentSlaveUsagesBySlaveId.get(slaveId)); | ||
|
||
if (taskManager.getActiveTasks().stream() | ||
.anyMatch(t -> t.getTaskRequest().getDeploy().getTimestamp().or(System.currentTimeMillis()) > maybeSlaveUsage.get().getSlaveUsage().getTimestamp() | ||
&& t.getMesosTask().getSlaveId().getValue().equals(slaveId))) { | ||
Optional<SingularitySlave> maybeSlave = slaveManager.getSlave(slaveId); | ||
if (maybeSlave.isPresent()) { | ||
CompletableFuture.supplyAsync(() -> | ||
usagePoller.collectSlaveUsage( | ||
maybeSlave.get(), | ||
System.currentTimeMillis(), | ||
new ConcurrentHashMap<>(), | ||
usageManager.getRequestUtilizations(), | ||
new ConcurrentHashMap<>(), | ||
new AtomicLong(), | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. given these arguments will be the same each time, does it make sense to create an overloaded method to handle those bits in UsagePoller instead? |
||
new AtomicLong(), | ||
new AtomicDouble(), | ||
new AtomicDouble(), | ||
new AtomicLong(), | ||
new AtomicLong()), | ||
offerScoringExecutor) | ||
.whenComplete((usage, throwable) -> { | ||
if (throwable == null && usage.isPresent()) { | ||
currentSlaveUsagesBySlaveId.put(slaveId, new SingularitySlaveUsageWithCalculatedScores( | ||
usage.get(), | ||
mesosConfiguration.getScoreUsingSystemLoad(), | ||
getMaxProbableUsageForSlave(activeTaskIds, requestUtilizations, offerHolders.get(slaveId).getSanitizedHost()), | ||
mesosConfiguration.getLoad5OverloadedThreshold(), | ||
mesosConfiguration.getLoad1OverloadedThreshold(), | ||
usage.get().getTimestamp() | ||
)); | ||
} else { | ||
throw new RuntimeException(throwable); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Where is the handling for this runtime exception? We currently aren't calling a get or join for the future created here which causes two issues for us:
|
||
} | ||
}); | ||
} | ||
} | ||
|
||
try { | ||
double score = calculateScore(offerHolder, currentSlaveUsagesBySlaveId, tasksPerOfferHost, taskRequestHolder, activeTaskIdsForRequest, requestUtilizations.get(taskRequestHolder.getTaskRequest().getRequest().getId())); | ||
if (score != 0) { | ||
scorePerOffer.put(slaveId, score); | ||
} | ||
} catch (Throwable t) { | ||
LOG.error("Uncaught exception while scoring offers", t); | ||
scoringException.set(t); | ||
} | ||
return null; | ||
} | ||
|
||
private MaxProbableUsage getMaxProbableUsageForSlave(List<SingularityTaskId> activeTaskIds, Map<String, RequestUtilization> requestUtilizations, String sanitizedHostname) { | ||
double cpu = 0; | ||
double memBytes = 0; | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Second thoughts about the setup. Would it make sense to instead collect additional usages in this block instead? I'm realizing that the loop below would be called for each pending task. If we hit a case where collecting a particular slave usage is throwing exceptions or timing out, we will continue to recheck it for each pending task. Whereas, if we check in this block instead, we can just omit that up front and leave the block below as it was previously.
If we move the usage collection here, we'll likely want to convert this from parallelStream to a list of CompletableFutures like below to have better control over the concurrency