Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Prevent new host overloading #1822

Merged
merged 29 commits into from
Aug 16, 2018
Merged
Show file tree
Hide file tree
Changes from 15 commits
Commits
Show all changes
29 commits
Select commit Hold shift + click to select a range
3a7a125
WIP: Prevent new host overloading
pschoenfelder Jul 17, 2018
e610cf1
Rework async stuff per PR
pschoenfelder Jul 19, 2018
339dd44
PR changes
pschoenfelder Jul 24, 2018
d814a9d
Move usage collection loop
pschoenfelder Jul 31, 2018
50ba080
Use semaphore
pschoenfelder Jul 31, 2018
39b3535
Fix tests
pschoenfelder Jul 31, 2018
bee2269
Resolve dependencies
pschoenfelder Jul 31, 2018
6c02f60
Fix tests
pschoenfelder Jul 31, 2018
fe45d81
Add timeout for slave usage checking
pschoenfelder Aug 1, 2018
1414edf
Build fix
pschoenfelder Aug 1, 2018
4650639
Build fix
pschoenfelder Aug 1, 2018
890504b
Fix failed data refresh
pschoenfelder Aug 1, 2018
5d7de80
Make fewer zk calls for usage fetching
ssalinas Aug 2, 2018
572407e
Add new method for slave usage
pschoenfelder Aug 6, 2018
3612d54
Merge branch 'new-host-overloading' of https://github.com/HubSpot/Sin…
pschoenfelder Aug 6, 2018
d527799
rm comment
pschoenfelder Aug 6, 2018
d055ff0
longs to doubles
pschoenfelder Aug 6, 2018
0127517
Add test tolerances
pschoenfelder Aug 6, 2018
f4de9ee
Remove more zk calls
pschoenfelder Aug 7, 2018
df9c3a0
Condense duplicate variables
pschoenfelder Aug 7, 2018
b1fbeda
Fix typo
pschoenfelder Aug 7, 2018
5ec231b
Skip hosts which do not have valid metrics during offer processing
ssalinas Aug 8, 2018
9ed693f
Merge pull request #1828 from HubSpot/skip_host
pschoenfelder Aug 8, 2018
b80e8c9
Add leader/web cache for request utilizatons
ssalinas Aug 8, 2018
a13440a
merge request utilization caching
ssalinas Aug 8, 2018
8cae538
new strategy for new host overlaod check
ssalinas Aug 9, 2018
4ea2166
fix the test client as well
ssalinas Aug 9, 2018
5b822c3
Add logging
ssalinas Aug 9, 2018
e2e6794
add try/catch here
ssalinas Aug 9, 2018
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,10 @@ public MesosClientException(String message, Throwable cause) {

public MesosSlaveStateObject getSlaveState(String uri);

public List<MesosTaskMonitorObject> getSlaveResourceUsage(String hostname);
default List<MesosTaskMonitorObject> getSlaveResourceUsage(String hostname) {
return getSlaveResourceUsage(hostname, false);
}

public List<MesosTaskMonitorObject> getSlaveResourceUsage(String hostname, boolean useShortTimeout);

}
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
import com.google.inject.name.Named;
import com.hubspot.horizon.HttpClient;
import com.hubspot.horizon.HttpRequest;
import com.hubspot.horizon.HttpRequest.Options;
import com.hubspot.horizon.HttpResponse;
import com.hubspot.mesos.JavaUtils;
import com.hubspot.mesos.json.MesosMasterMetricsSnapshotObject;
Expand All @@ -22,7 +23,8 @@
@Singleton
public class SingularityMesosClient implements MesosClient {

public static final String HTTP_CLIENT_NAME = "mesos.http.client";
public static final String DEFAULT_HTTP_CLIENT_NAME = "mesos.http.client";
public static final String SHORT_TIMEOUT_HTTP_CLIENT_NAME = "mesos.http.client.short.timeout";

private static final Logger LOG = LoggerFactory.getLogger(SingularityMesosClient.class);

Expand All @@ -35,10 +37,13 @@ public class SingularityMesosClient implements MesosClient {
private static final TypeReference<List<MesosTaskMonitorObject>> TASK_MONITOR_TYPE_REFERENCE = new TypeReference<List<MesosTaskMonitorObject>>() {};

private final HttpClient httpClient;
private final HttpClient shortTimeoutHttpClient;

@Inject
public SingularityMesosClient(@Named(HTTP_CLIENT_NAME) HttpClient httpClient) {
public SingularityMesosClient(@Named(DEFAULT_HTTP_CLIENT_NAME) HttpClient httpClient,
@Named(SHORT_TIMEOUT_HTTP_CLIENT_NAME) HttpClient shortTimeoutHttpClient) {
this.httpClient = httpClient;
this.shortTimeoutHttpClient = shortTimeoutHttpClient;
}

@Override
Expand All @@ -51,15 +56,16 @@ public String getMasterMetricsSnapshotUri(String hostnameAndPort) {
return String.format(MESOS_MASTER_METRICS_SNAPSHOT_URL, hostnameAndPort);
}

private HttpResponse getFromMesos(String uri) {
private HttpResponse getFromMesos(String uri, boolean useShortTimeout) {
HttpClient currentHttpClient = useShortTimeout ? shortTimeoutHttpClient : httpClient;
HttpResponse response = null;

final long start = System.currentTimeMillis();

LOG.debug("Fetching {} from mesos", uri);

try {
response = httpClient.execute(HttpRequest.newBuilder().setUrl(uri).build());
response = currentHttpClient.execute(HttpRequest.newBuilder().setUrl(uri).build(), new Options());

LOG.debug("Response {} - {} after {}", response.getStatusCode(), uri, JavaUtils.duration(start));
} catch (Exception e) {
Expand All @@ -74,7 +80,7 @@ private HttpResponse getFromMesos(String uri) {
}

private <T> T getFromMesos(String uri, Class<T> clazz) {
HttpResponse response = getFromMesos(uri);
HttpResponse response = getFromMesos(uri, false);

try {
return response.getAs(clazz);
Expand Down Expand Up @@ -109,10 +115,10 @@ public MesosSlaveStateObject getSlaveState(String uri) {
}

@Override
public List<MesosTaskMonitorObject> getSlaveResourceUsage(String hostname) {
public List<MesosTaskMonitorObject> getSlaveResourceUsage(String hostname, boolean useShortTimeout) {
final String uri = String.format(MESOS_SLAVE_STATISTICS_URL, hostname);

HttpResponse response = getFromMesos(uri);
HttpResponse response = getFromMesos(uri, useShortTimeout);

try {
return response.getAs(TASK_MONITOR_TYPE_REFERENCE);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,21 +6,26 @@
import com.google.inject.name.Names;
import com.hubspot.horizon.HttpClient;
import com.hubspot.horizon.HttpConfig;
import com.hubspot.horizon.HttpConfig.Builder;
import com.hubspot.horizon.ning.NingHttpClient;
import com.hubspot.mesos.JavaUtils;

public class SingularityMesosClientModule extends AbstractModule {

public static final String MESOS_CLIENT_OBJECT_MAPPER = "singularity.mesos.client.object.mapper";
private static final int MESOS_CLIENT_HTTP_SHORT_TIMEOUT_SECONDS = 5;

@Override
protected void configure() {
ObjectMapper objectMapper = JavaUtils.newObjectMapper();
HttpConfig httpConfig = HttpConfig.newBuilder().setObjectMapper(objectMapper).build();
HttpClient httpClient = new NingHttpClient(httpConfig);
Builder httpConfigBuilder = HttpConfig.newBuilder().setObjectMapper(objectMapper);

bind(ObjectMapper.class).annotatedWith(Names.named(MESOS_CLIENT_OBJECT_MAPPER)).toInstance(objectMapper);
bind(HttpClient.class).annotatedWith(Names.named(SingularityMesosClient.HTTP_CLIENT_NAME)).toInstance(httpClient);
bind(HttpClient.class).annotatedWith(Names.named(SingularityMesosClient.DEFAULT_HTTP_CLIENT_NAME))
.toInstance(new NingHttpClient(httpConfigBuilder.build()));

bind(HttpClient.class).annotatedWith(Names.named(SingularityMesosClient.SHORT_TIMEOUT_HTTP_CLIENT_NAME))
.toInstance(new NingHttpClient(httpConfigBuilder.setRequestTimeoutSeconds(MESOS_CLIENT_HTTP_SHORT_TIMEOUT_SECONDS).build()));

bind(MesosClient.class).to(SingularityMesosClient.class).in(Scopes.SINGLETON);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -376,6 +376,8 @@ public class SingularityConfiguration extends Configuration {

private long preemptibleTaskMaxExpectedRuntimeMs = 900000; // 15 minutes

private long maxSlaveUsageMetricAgeMs = 30000;

public long getAskDriverToKillTasksAgainAfterMillis() {
return askDriverToKillTasksAgainAfterMillis;
}
Expand Down Expand Up @@ -1593,4 +1595,12 @@ public long getPreemptibleTaskMaxExpectedRuntimeMs() {
public void setPreemptibleTaskMaxExpectedRuntimeMs(long preemptibleTaskMaxExpectedRuntimeMs) {
this.preemptibleTaskMaxExpectedRuntimeMs = preemptibleTaskMaxExpectedRuntimeMs;
}

public long getMaxSlaveUsageMetricAgeMs() {
return maxSlaveUsageMetricAgeMs;
}

public void setMaxSlaveUsageMetricAgeMs(long maxSlaveUsageMetricAgeMs) {
this.maxSlaveUsageMetricAgeMs = maxSlaveUsageMetricAgeMs;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.function.Function;
import java.util.stream.Collectors;

Expand Down Expand Up @@ -192,6 +193,19 @@ public Map<String, RequestUtilization> getRequestUtilizations() {
));
}

public Map<String, RequestUtilization> getRequestUtilizations(Set<String> requestIds) {
List<String> paths = new ArrayList<>();
for (String requestId : requestIds) {
paths.add(getRequestPath(requestId));
}
return getAsync("/usage/requests", paths, requestUtilizationTranscoder)
.stream()
.collect(Collectors.toMap(
RequestUtilization::getRequestId,
Function.identity()
));
}

public Optional<RequestUtilization> getRequestUtilization(String requestId) {
return getData(getRequestPath(requestId), requestUtilizationTranscoder);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,10 @@
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
Expand All @@ -29,6 +31,7 @@
import com.hubspot.singularity.RequestUtilization;
import com.hubspot.singularity.SingularityDeployStatistics;
import com.hubspot.singularity.SingularityPendingTaskId;
import com.hubspot.singularity.SingularitySlave;
import com.hubspot.singularity.SingularitySlaveUsage;
import com.hubspot.singularity.SingularitySlaveUsageWithId;
import com.hubspot.singularity.SingularityTask;
Expand All @@ -41,13 +44,15 @@
import com.hubspot.singularity.config.MesosConfiguration;
import com.hubspot.singularity.config.SingularityConfiguration;
import com.hubspot.singularity.data.DeployManager;
import com.hubspot.singularity.data.SlaveManager;
import com.hubspot.singularity.data.TaskManager;
import com.hubspot.singularity.data.UsageManager;
import com.hubspot.singularity.helpers.MesosUtils;
import com.hubspot.singularity.helpers.SingularityMesosTaskHolder;
import com.hubspot.singularity.mesos.SingularitySlaveUsageWithCalculatedScores.MaxProbableUsage;
import com.hubspot.singularity.scheduler.SingularityLeaderCache;
import com.hubspot.singularity.scheduler.SingularityScheduler;
import com.hubspot.singularity.scheduler.SingularityUsageHelper;

@Singleton
public class SingularityMesosOfferScheduler {
Expand All @@ -65,6 +70,8 @@ public class SingularityMesosOfferScheduler {
private final SingularitySlaveAndRackManager slaveAndRackManager;
private final SingularitySlaveAndRackHelper slaveAndRackHelper;
private final SingularityTaskSizeOptimizer taskSizeOptimizer;
private final SingularityUsageHelper usageHelper;
private final SlaveManager slaveManager;
private final UsageManager usageManager;
private final DeployManager deployManager;
private final SingularitySchedulerLock lock;
Expand All @@ -89,6 +96,8 @@ public SingularityMesosOfferScheduler(MesosConfiguration mesosConfiguration,
SingularityTaskSizeOptimizer taskSizeOptimizer,
SingularitySlaveAndRackHelper slaveAndRackHelper,
SingularityLeaderCache leaderCache,
SingularityUsageHelper usageHelper,
SlaveManager slaveManager,
UsageManager usageManager,
DeployManager deployManager,
SingularitySchedulerLock lock) {
Expand All @@ -102,6 +111,8 @@ public SingularityMesosOfferScheduler(MesosConfiguration mesosConfiguration,
this.slaveAndRackManager = slaveAndRackManager;
this.taskSizeOptimizer = taskSizeOptimizer;
this.leaderCache = leaderCache;
this.usageHelper = usageHelper;
this.slaveManager = slaveManager;
this.slaveAndRackHelper = slaveAndRackHelper;
this.taskPrioritizer = taskPrioritizer;
this.usageManager = usageManager;
Expand Down Expand Up @@ -140,6 +151,7 @@ public Collection<SingularityOfferHolder> checkOffers(final Collection<Offer> of

final List<SingularityTaskRequestHolder> sortedTaskRequestHolders = getSortedDueTaskRequests();
final int numDueTasks = sortedTaskRequestHolders.size();
Set<String> relevantRequestIds = new HashSet<>();

final Map<String, SingularityOfferHolder> offerHolders = offers.stream()
.collect(Collectors.groupingBy((o) -> o.getAgentId().getValue()))
Expand All @@ -157,32 +169,75 @@ public Collection<SingularityOfferHolder> checkOffers(final Collection<Offer> of
slaveAndRackHelper.getTextAttributes(offersList.get(0)),
slaveAndRackHelper.getReservedSlaveAttributes(offersList.get(0)));
})
.peek((offerHolder) -> {
taskManager.getActiveTaskIds().forEach((t) -> {
if (t.getSanitizedHost().equals(offerHolder.getSanitizedHost())) {
relevantRequestIds.add(t.getRequestId());
}
});
})
.collect(Collectors.toMap(SingularityOfferHolder::getSlaveId, Function.identity()));

if (sortedTaskRequestHolders.isEmpty()) {
return offerHolders.values();
}

final AtomicInteger tasksScheduled = new AtomicInteger(0);
Map<String, RequestUtilization> requestUtilizations = usageManager.getRequestUtilizations();
Map<String, RequestUtilization> requestUtilizations = usageManager.getRequestUtilizations(relevantRequestIds);
List<SingularityTaskId> activeTaskIds = taskManager.getActiveTaskIds();

final Map<String, SingularitySlaveUsageWithCalculatedScores> currentSlaveUsagesBySlaveId = usageManager.getCurrentSlaveUsages(
Map<String, SingularitySlaveUsageWithId> currentSlaveUsages = usageManager.getCurrentSlaveUsages(
offerHolders.values()
.stream()
.map(SingularityOfferHolder::getSlaveId)
.collect(Collectors.toList()))
.parallelStream()
.collect(Collectors.toMap(
SingularitySlaveUsageWithId::getSlaveId,
(usageWithId) -> new SingularitySlaveUsageWithCalculatedScores(
usageWithId,
.stream()
.collect(Collectors.toMap(SingularitySlaveUsageWithId::getSlaveId, Function.identity()));

List<CompletableFuture<Void>> currentSlaveUsagesFutures = new ArrayList<>();
for (SingularityOfferHolder offerHolder : offerHolders.values()) {
currentSlaveUsagesFutures.add(offerScoringSemaphore.call(() -> CompletableFuture.runAsync(() -> {
String slaveId = offerHolder.getSlaveId();
Optional<SingularitySlaveUsageWithId> maybeSlaveUsage = Optional.fromNullable(currentSlaveUsages.get(slaveId));

if (maybeSlaveUsage.isPresent() && taskManager.getActiveTaskIds().stream()
.anyMatch(t -> t.getStartedAt() > maybeSlaveUsage.get().getTimestamp()
&& t.getSanitizedHost().equals(offerHolder.getSanitizedHost()))) {
Optional<SingularitySlave> maybeSlave = slaveManager.getSlave(slaveId);
if (maybeSlave.isPresent()) {
Optional<SingularitySlaveUsage> usage = usageHelper.collectSlaveUsageSimple(
maybeSlave.get(),
System.currentTimeMillis(),
true);
if (usage.isPresent()) {
currentSlaveUsages.put(slaveId, new SingularitySlaveUsageWithId(usage.get(), slaveId));
} else {
LOG.warn("Failed to refresh stale slave usage data for {}. Will not schedule tasks right now.", maybeSlave.get().getName());
currentSlaveUsages.remove(slaveId);
}
}
}
}, offerScoringExecutor)));
}
CompletableFutures.allOf(currentSlaveUsagesFutures).join();

List<CompletableFuture<Void>> usagesWithScoresFutures = new ArrayList<>();
Map<String, SingularitySlaveUsageWithCalculatedScores> currentSlaveUsagesBySlaveId = new ConcurrentHashMap<>();
for (SingularitySlaveUsageWithId usage : currentSlaveUsages.values()) {
usagesWithScoresFutures.add(offerScoringSemaphore.call(() ->
CompletableFuture.runAsync(() -> currentSlaveUsagesBySlaveId.put(usage.getSlaveId(),
new SingularitySlaveUsageWithCalculatedScores(
usage,
mesosConfiguration.getScoreUsingSystemLoad(),
getMaxProbableUsageForSlave(activeTaskIds, requestUtilizations, offerHolders.get(usageWithId.getSlaveId()).getSanitizedHost()),
getMaxProbableUsageForSlave(activeTaskIds, requestUtilizations, offerHolders.get(usage.getSlaveId()).getSanitizedHost()),
mesosConfiguration.getLoad5OverloadedThreshold(),
mesosConfiguration.getLoad1OverloadedThreshold()
)
));
mesosConfiguration.getLoad1OverloadedThreshold(),
usage.getTimestamp())),
offerScoringExecutor))
);
}

CompletableFutures.allOf(usagesWithScoresFutures).join();

LOG.trace("Found slave usages {}", currentSlaveUsagesBySlaveId);

Expand All @@ -196,23 +251,11 @@ public Collection<SingularityOfferHolder> checkOffers(final Collection<Offer> of
List<CompletableFuture<Void>> scoringFutures = new ArrayList<>();
AtomicReference<Throwable> scoringException = new AtomicReference<>(null);
for (SingularityOfferHolder offerHolder : offerHolders.values()) {
if (!isOfferFull(offerHolder)) {
scoringFutures.add(
offerScoringSemaphore.call(
() -> CompletableFuture.runAsync(() -> {
try {
double score = calculateScore(offerHolder, currentSlaveUsagesBySlaveId, tasksPerOfferHost, taskRequestHolder, activeTaskIdsForRequest, requestUtilizations.get(taskRequestHolder.getTaskRequest().getRequest().getId()));
if (score != 0) {
scorePerOffer.put(offerHolder.getSlaveId(), score);
}
} catch (Throwable t) {
LOG.error("Uncaught exception while scoring offers", t);
scoringException.set(t);
}
},
offerScoringExecutor
)));
}
scoringFutures.add(offerScoringSemaphore.call(() ->
CompletableFuture.supplyAsync(() -> {
return calculateScore(requestUtilizations, currentSlaveUsagesBySlaveId, tasksPerOfferHost, taskRequestHolder, scorePerOffer, activeTaskIdsForRequest, scoringException, offerHolder);
},
offerScoringExecutor)));
}

CompletableFutures.allOf(scoringFutures).join();
Expand Down Expand Up @@ -240,6 +283,32 @@ public Collection<SingularityOfferHolder> checkOffers(final Collection<Offer> of
return offerHolders.values();
}

private Void calculateScore(
Map<String, RequestUtilization> requestUtilizations,
Map<String, SingularitySlaveUsageWithCalculatedScores> currentSlaveUsagesBySlaveId,
Map<String, Integer> tasksPerOfferHost,
SingularityTaskRequestHolder taskRequestHolder,
Map<String, Double> scorePerOffer,
List<SingularityTaskId> activeTaskIdsForRequest,
AtomicReference<Throwable> scoringException,
SingularityOfferHolder offerHolder) {
if (isOfferFull(offerHolder)) {
return null;
}
String slaveId = offerHolder.getSlaveId();

try {
double score = calculateScore(offerHolder, currentSlaveUsagesBySlaveId, tasksPerOfferHost, taskRequestHolder, activeTaskIdsForRequest, requestUtilizations.get(taskRequestHolder.getTaskRequest().getRequest().getId()));
if (score != 0) {
scorePerOffer.put(slaveId, score);
}
} catch (Throwable t) {
LOG.error("Uncaught exception while scoring offers", t);
scoringException.set(t);
}
return null;
}

private MaxProbableUsage getMaxProbableUsageForSlave(List<SingularityTaskId> activeTaskIds, Map<String, RequestUtilization> requestUtilizations, String sanitizedHostname) {
double cpu = 0;
double memBytes = 0;
Expand Down
Loading