Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
b5e10c4
added `metric` and `mode` arguments to `tune.run()`
Sep 7, 2020
4143008
Updated search algorithms
Sep 7, 2020
5eadd6c
Merge branch 'master' into tune-mode-metric
Sep 7, 2020
6ba754a
Updated seacher base class
Sep 7, 2020
6a73824
lint
Sep 7, 2020
4bbe2df
Fix tests
Sep 7, 2020
6a73ae1
Trigger new build
Sep 8, 2020
e998ee0
Update experiment analysis
Sep 8, 2020
31567b5
Set default mode and metric for experiment analysis
Sep 8, 2020
82bbb1d
Merge branch 'tune-mode-metric' into tune-experiment-analysis
Sep 8, 2020
05a7e41
Added easy to use utility functions for experiment analysis
Sep 8, 2020
5b8ce91
Updated docs
Sep 8, 2020
b015fcb
Use tune sklearn master
Sep 8, 2020
30749c3
Update shim default args
Sep 8, 2020
33fbbba
Merge branch 'master' into tune-mode-metric
Sep 8, 2020
f978d89
Merge branch 'tune-mode-metric' into tune-experiment-analysis
Sep 8, 2020
c82f079
Fix dataframe tests
Sep 8, 2020
d909452
Fix dataframe tests
Sep 8, 2020
4f452f1
Fix errors
Sep 8, 2020
fec8d64
Updated docs and type hints
Sep 8, 2020
f8c4d5d
Merge branch 'master' into concurrency
richardliaw Sep 8, 2020
21d47d4
fix-tune
richardliaw Sep 9, 2020
602a28e
Merge branch 'master' into concurrency
richardliaw Sep 9, 2020
e76e06a
Merge branch 'master' into tune-experiment-analysis
richardliaw Sep 9, 2020
193b784
Merge branch 'tune-experiment-analysis' into concurrency
richardliaw Sep 9, 2020
72ebf45
Merge branch 'master' into concurrency
richardliaw Sep 9, 2020
e5e5a08
fix
richardliaw Sep 9, 2020
945a35f
lint
richardliaw Sep 9, 2020
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 10 additions & 9 deletions python/ray/tune/examples/bayesopt_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
import ray
from ray import tune
from ray.tune.schedulers import AsyncHyperBandScheduler
from ray.tune.suggest import ConcurrencyLimiter
from ray.tune.suggest.bayesopt import BayesOptSearch


Expand Down Expand Up @@ -43,18 +44,18 @@ def easy_objective(config):
"height": tune.uniform(-100, 100)
}
}
algo = BayesOptSearch(
metric="mean_loss",
mode="min",
utility_kwargs={
"kind": "ucb",
"kappa": 2.5,
"xi": 0.0
})
scheduler = AsyncHyperBandScheduler(metric="mean_loss", mode="min")
algo = BayesOptSearch(utility_kwargs={
"kind": "ucb",
"kappa": 2.5,
"xi": 0.0
})
algo = ConcurrencyLimiter(algo, max_concurrent=4)
scheduler = AsyncHyperBandScheduler()
tune.run(
easy_objective,
name="my_exp",
metric="mean_loss",
mode="min",
search_alg=algo,
scheduler=scheduler,
**tune_kwargs)
9 changes: 6 additions & 3 deletions python/ray/tune/examples/dragonfly_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@

import ray
from ray import tune
from ray.tune.suggest import ConcurrencyLimiter
from ray.tune.schedulers import AsyncHyperBandScheduler
from ray.tune.suggest.dragonfly import DragonflySearch

Expand Down Expand Up @@ -70,12 +71,14 @@ def objective(config):
optimizer="bandit",
domain="euclidean",
# space=space, # If you want to set the space manually
metric="objective",
mode="max")
)
df_search = ConcurrencyLimiter(df_search, max_concurrent=4)

scheduler = AsyncHyperBandScheduler(metric="objective", mode="max")
scheduler = AsyncHyperBandScheduler()
tune.run(
objective,
metric="objective",
mode="max",
name="dragonfly_search",
search_alg=df_search,
scheduler=scheduler,
Expand Down
12 changes: 6 additions & 6 deletions python/ray/tune/examples/hyperband_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,16 +3,15 @@
import argparse
import json
import os
import random

import numpy as np

import ray
from ray.tune import Trainable, run, sample_from
from ray import tune
from ray.tune.schedulers import HyperBandScheduler


class MyTrainableClass(Trainable):
class MyTrainableClass(tune.Trainable):
"""Example agent whose learning curve is a random sigmoid.

The dummy hyperparameters "width" and "height" determine the slope and
Expand Down Expand Up @@ -58,13 +57,14 @@ def load_checkpoint(self, checkpoint_path):
mode="max",
max_t=200)

run(MyTrainableClass,
tune.run(
MyTrainableClass,
name="hyperband_test",
num_samples=20,
stop={"training_iteration": 1 if args.smoke_test else 99999},
config={
"width": sample_from(lambda spec: 10 + int(90 * random.random())),
"height": sample_from(lambda spec: int(100 * random.random()))
"width": tune.randint(10, 90),
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: this would be randint(10, 100)

Should we also remove mode/metric from the scheduler and pass it to tune.run here?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

will do this in followup!

"height": tune.randint(0, 100)
},
scheduler=hyperband,
fail_fast=True)
15 changes: 11 additions & 4 deletions python/ray/tune/examples/hyperopt_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@

import ray
from ray import tune
from ray.tune.suggest import ConcurrencyLimiter
from ray.tune.schedulers import AsyncHyperBandScheduler
from ray.tune.suggest.hyperopt import HyperOptSearch

Expand Down Expand Up @@ -58,8 +59,14 @@ def easy_objective(config):
"activation": tune.choice(["relu", "tanh"])
}
}
algo = HyperOptSearch(
metric="mean_loss", mode="min", points_to_evaluate=current_best_params)
scheduler = AsyncHyperBandScheduler(metric="mean_loss", mode="min")
algo = HyperOptSearch(points_to_evaluate=current_best_params)
algo = ConcurrencyLimiter(algo, max_concurrent=4)

scheduler = AsyncHyperBandScheduler()
tune.run(
easy_objective, search_alg=algo, scheduler=scheduler, **tune_kwargs)
easy_objective,
search_alg=algo,
scheduler=scheduler,
metric="mean_loss",
mode="min",
**tune_kwargs)
4 changes: 3 additions & 1 deletion python/ray/tune/examples/lightgbm_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,8 @@ def train_breast_cancer(config):
from ray.tune.schedulers import ASHAScheduler
tune.run(
train_breast_cancer,
metric="binary_error",
mode="min",
config=config,
num_samples=2,
scheduler=ASHAScheduler(metric="binary_error", mode="min"))
scheduler=ASHAScheduler())
6 changes: 2 additions & 4 deletions python/ray/tune/examples/mlflow_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
import mlflow
from mlflow.tracking import MlflowClient
import time
import random

from ray import tune
from ray.tune.logger import MLFLowLogger, DEFAULT_LOGGERS
Expand Down Expand Up @@ -44,9 +43,8 @@ def easy_objective(config):
"logger_config": {
"mlflow_experiment_id": experiment_id,
},
"width": tune.sample_from(
lambda spec: 10 + int(90 * random.random())),
"height": tune.sample_from(lambda spec: int(100 * random.random()))
"width": tune.randint(10, 100),
"height": tune.randint(0, 100),
})

df = mlflow.search_runs([experiment_id])
Expand Down
20 changes: 11 additions & 9 deletions python/ray/tune/examples/mnist_pytorch.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
# Original Code here:
# https://github.com/pytorch/examples/blob/master/mnist/main.py
import os
import numpy as np
import argparse
from filelock import FileLock
import torch
Expand Down Expand Up @@ -89,7 +88,7 @@ def get_data_loaders():


def train_mnist(config):
use_cuda = config.get("use_gpu") and torch.cuda.is_available()
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
train_loader, test_loader = get_data_loaders()
model = ConvNet().to(device)
Expand All @@ -100,6 +99,7 @@ def train_mnist(config):
while True:
train(model, optimizer, train_loader, device)
acc = test(model, test_loader, device)
# Set this to run Tune.
tune.report(mean_accuracy=acc)


Expand All @@ -120,10 +120,14 @@ def train_mnist(config):
ray.init(address=args.ray_address)
else:
ray.init(num_cpus=2 if args.smoke_test else None)
sched = AsyncHyperBandScheduler(
time_attr="training_iteration", metric="mean_accuracy", mode="max")

# for early stopping
sched = AsyncHyperBandScheduler()

analysis = tune.run(
train_mnist,
metric="mean_accuracy",
mode="max",
name="exp",
scheduler=sched,
stop={
Expand All @@ -132,14 +136,12 @@ def train_mnist(config):
},
resources_per_trial={
"cpu": 2,
"gpu": int(args.cuda)
"gpu": int(args.cuda) # set this for GPUs
},
num_samples=1 if args.smoke_test else 50,
config={
"lr": tune.sample_from(lambda spec: 10**(-10 * np.random.rand())),
"lr": tune.loguniform(1e-4, 1e-2),
"momentum": tune.uniform(0.1, 0.9),
"use_gpu": int(args.cuda)
})

print("Best config is:",
analysis.get_best_config(metric="mean_accuracy", mode="max"))
print("Best config is:", analysis.best_config)
9 changes: 6 additions & 3 deletions python/ray/tune/examples/nevergrad_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@

import ray
from ray import tune
from ray.tune.suggest import ConcurrencyLimiter
from ray.tune.schedulers import AsyncHyperBandScheduler
from ray.tune.suggest.nevergrad import NevergradSearch

Expand Down Expand Up @@ -57,13 +58,15 @@ def easy_objective(config):
algo = NevergradSearch(
optimizer=ng.optimizers.OnePlusOne,
# space=space, # If you want to set the space manually
metric="mean_loss",
mode="min")
)
algo = ConcurrencyLimiter(algo, max_concurrent=4)

scheduler = AsyncHyperBandScheduler(metric="mean_loss", mode="min")
scheduler = AsyncHyperBandScheduler()

tune.run(
easy_objective,
metric="mean_loss",
mode="min",
name="nevergrad",
search_alg=algo,
scheduler=scheduler,
Expand Down
13 changes: 10 additions & 3 deletions python/ray/tune/examples/optuna_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@

import ray
from ray import tune
from ray.tune.suggest import ConcurrencyLimiter
from ray.tune.schedulers import AsyncHyperBandScheduler
from ray.tune.suggest.optuna import OptunaSearch

Expand Down Expand Up @@ -45,7 +46,13 @@ def easy_objective(config):
"activation": tune.choice(["relu", "tanh"])
}
}
algo = OptunaSearch(metric="mean_loss", mode="min")
scheduler = AsyncHyperBandScheduler(metric="mean_loss", mode="min")
algo = OptunaSearch()
algo = ConcurrencyLimiter(algo, max_concurrent=4)
scheduler = AsyncHyperBandScheduler()
tune.run(
easy_objective, search_alg=algo, scheduler=scheduler, **tune_kwargs)
easy_objective,
metric="mean_loss",
mode="min",
search_alg=algo,
scheduler=scheduler,
**tune_kwargs)
8 changes: 5 additions & 3 deletions python/ray/tune/examples/skopt_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@

import ray
from ray import tune
from ray.tune.suggest import ConcurrencyLimiter
from ray.tune.schedulers import AsyncHyperBandScheduler
from ray.tune.suggest.skopt import SkOptSearch

Expand Down Expand Up @@ -59,15 +60,16 @@ def easy_objective(config):
algo = SkOptSearch(
# parameter_names=space.keys(), # If you want to set the space
# parameter_ranges=space.values(), # If you want to set the space
metric="mean_loss",
mode="min",
points_to_evaluate=previously_run_params,
evaluated_rewards=known_rewards)
algo = ConcurrencyLimiter(algo, max_concurrent=4)

scheduler = AsyncHyperBandScheduler(metric="mean_loss", mode="min")
scheduler = AsyncHyperBandScheduler()

tune.run(
easy_objective,
metric="mean_loss",
mode="min",
name="skopt_exp_with_warmstart",
search_alg=algo,
scheduler=scheduler,
Expand Down
12 changes: 4 additions & 8 deletions python/ray/tune/examples/tune_cifar10_gluon.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,8 +154,8 @@ def train(epoch):
with ag.record():
outputs = [finetune_net(X) for X in data]
loss = [L(yhat, y) for yhat, y in zip(outputs, label)]
for l in loss:
l.backward()
for ls in loss:
ls.backward()

trainer.step(batch_size)
mx.nd.waitall()
Expand All @@ -170,7 +170,7 @@ def test():
outputs = [finetune_net(X) for X in data]
loss = [L(yhat, y) for yhat, y in zip(outputs, label)]

test_loss += sum(l.mean().asscalar() for l in loss) / len(loss)
test_loss += sum(ls.mean().asscalar() for ls in loss) / len(loss)
metric.update(label, outputs)

_, test_acc = metric.get()
Expand All @@ -194,11 +194,7 @@ def test():
sched = FIFOScheduler()
elif args.scheduler == "asynchyperband":
sched = AsyncHyperBandScheduler(
time_attr="training_iteration",
metric="mean_loss",
mode="min",
max_t=400,
grace_period=60)
metric="mean_loss", mode="min", max_t=400, grace_period=60)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We could also pass metric/mode to tune.run here

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

will do this in followup!

else:
raise NotImplementedError
tune.run(
Expand Down
1 change: 1 addition & 0 deletions python/ray/tune/schedulers/async_hyperband.py
Original file line number Diff line number Diff line change
Expand Up @@ -191,6 +191,7 @@ def on_result(self, trial, cur_iter, cur_rew):
return action

def debug_str(self):
# TODO: fix up the output for this
iters = " | ".join([
"Iter {:.3f}: {}".format(milestone, self.cutoff(recorded))
for milestone, recorded in self._rungs
Expand Down
3 changes: 3 additions & 0 deletions python/ray/tune/suggest/repeater.py
Original file line number Diff line number Diff line change
Expand Up @@ -167,3 +167,6 @@ def get_state(self):

def set_state(self, state):
self.__dict__.update(state)

def set_search_properties(self, metric, mode, config):
return self.searcher.set_search_properties(metric, mode, config)
3 changes: 3 additions & 0 deletions python/ray/tune/suggest/suggestion.py
Original file line number Diff line number Diff line change
Expand Up @@ -366,3 +366,6 @@ def on_pause(self, trial_id):

def on_unpause(self, trial_id):
self.searcher.on_unpause(trial_id)

def set_search_properties(self, metric, mode, config):
return self.searcher.set_search_properties(metric, mode, config)