Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
54 commits
Select commit Hold shift + click to select a range
a16c807
Added basic functionality and tests
Aug 26, 2020
6750060
Feature parity with old tune search space config
Aug 27, 2020
47c8f23
Merge branch 'master' into tune-search-space
Aug 27, 2020
aaaab73
Convert Optuna search spaces
Aug 27, 2020
546e9ba
Introduced quantized values
Aug 27, 2020
4d75d91
Merge branch 'master' of https://github.com/ray-project/ray into tune…
Aug 28, 2020
18535a6
Updated Optuna resolving
Aug 28, 2020
3ab491c
Added HyperOpt search space conversion
Aug 28, 2020
cba10d8
Convert search spaces to AxSearch
Aug 28, 2020
f83ed33
Convert search spaces to BayesOpt
Aug 28, 2020
c93fb76
Added basic functionality and tests
Aug 26, 2020
2ffbdca
Feature parity with old tune search space config
Aug 27, 2020
13a153c
Convert Optuna search spaces
Aug 27, 2020
6abffaa
Introduced quantized values
Aug 27, 2020
2cbd77b
Updated Optuna resolving
Aug 28, 2020
c68b9c4
Added HyperOpt search space conversion
Aug 28, 2020
1167c75
Convert search spaces to AxSearch
Aug 28, 2020
a4f2d2d
Convert search spaces to BayesOpt
Aug 28, 2020
bd7ed77
Re-factored samplers into domain classes
Aug 31, 2020
e1ba45c
Re-added base classes
Aug 31, 2020
9d32499
Re-factored into list comprehensions
Aug 31, 2020
24ff642
Merge remote-tracking branch 'origin/tune-search-space' into tune-sea…
Aug 31, 2020
e46b632
Added `from_config` classmethod for config conversion
Aug 31, 2020
0089a63
Applied suggestions from code review
Aug 31, 2020
e8f31c0
Removed truncated normal distribution
Sep 1, 2020
e4b404f
Set search properties in tune.run
Sep 1, 2020
8e74a1b
Added test for tune.run search properties
Sep 1, 2020
77c3cd7
Move sampler initializers to base classes
Sep 1, 2020
3642e2d
Add tune API sampling test, fixed includes, fixed resampling bug
Sep 1, 2020
d9cb33f
Add to API docs
Sep 1, 2020
1db85b9
Merge branch 'master' of https://github.com/ray-project/ray into tune…
Sep 1, 2020
98623f3
Fix docs
Sep 1, 2020
65f6c9a
Update metric and mode only when set. Set default metric and mode to …
Sep 1, 2020
04cc380
Fix experiment analysis tests
Sep 1, 2020
e921039
Raise error when delimiter is used in the config keys
Sep 1, 2020
7940dbc
Added randint/qrandint to API docs, added additional check in tune.run
Sep 1, 2020
8a8b880
Fix tests
Sep 1, 2020
a4fbcf0
Fix linting error
Sep 1, 2020
e60eb76
Applied suggestions from code review. Re-aded tune.function for the t…
Sep 2, 2020
1aaff61
Fix sampling tests
Sep 2, 2020
49c2b4f
Fix experiment analysis tests
Sep 2, 2020
2b209cb
Fix tests and linting error
Sep 2, 2020
1a78055
Removed unnecessary default_config attribute from OptunaSearch
Sep 2, 2020
00e26c9
Merge branch 'master' of https://github.com/ray-project/ray into tune…
Sep 2, 2020
cb87044
Revert to set AxSearch default metric
Sep 2, 2020
3b04346
fix-min-max
richardliaw Sep 2, 2020
b7f7f67
fix
richardliaw Sep 2, 2020
49bf48d
nits
richardliaw Sep 2, 2020
84275d1
Added function check, enhanced loguniform error message
Sep 2, 2020
0222f16
fix-print
richardliaw Sep 2, 2020
a9b4069
Merge branch 'tune-search-space' of github.com:krfricke/ray into tune…
richardliaw Sep 2, 2020
d72ccf1
fix
richardliaw Sep 2, 2020
3070cda
fix
richardliaw Sep 3, 2020
b53b653
Raise if unresolved values are in config and search space is already set
Sep 3, 2020
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions doc/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,8 @@ def __getattr__(cls, name):


MOCK_MODULES = [
"ax",
"ax.service.ax_client",
"blist",
"gym",
"gym.spaces",
Expand Down
27 changes: 26 additions & 1 deletion doc/source/tune/api_docs/grid_random.rst
Original file line number Diff line number Diff line change
Expand Up @@ -164,16 +164,41 @@ tune.randn

.. autofunction:: ray.tune.randn

tune.qrandn
~~~~~~~~~~~

.. autofunction:: ray.tune.qrandn

tune.loguniform
~~~~~~~~~~~~~~~

.. autofunction:: ray.tune.loguniform

tune.qloguniform
~~~~~~~~~~~~~~~~

.. autofunction:: ray.tune.qloguniform

tune.uniform
~~~~~~~~~~~~

.. autofunction:: ray.tune.uniform

tune.quniform
~~~~~~~~~~~~~

.. autofunction:: ray.tune.quniform

tune.randint
~~~~~~~~~~~~

.. autofunction:: ray.tune.randint

tune.qrandint
~~~~~~~~~~~~~

.. autofunction:: ray.tune.qrandint

tune.choice
~~~~~~~~~~~

Expand All @@ -182,7 +207,7 @@ tune.choice
tune.sample_from
~~~~~~~~~~~~~~~~

.. autoclass:: ray.tune.sample_from
.. autofunction:: ray.tune.sample_from

Grid Search API
---------------
Expand Down
14 changes: 8 additions & 6 deletions python/ray/tune/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,15 +12,17 @@
save_checkpoint, checkpoint_dir)
from ray.tune.progress_reporter import (ProgressReporter, CLIReporter,
JupyterNotebookReporter)
from ray.tune.sample import (function, sample_from, uniform, choice, randint,
randn, loguniform)
from ray.tune.sample import (function, sample_from, uniform, quniform, choice,
randint, qrandint, randn, qrandn, loguniform,
qloguniform)

__all__ = [
"Trainable", "DurableTrainable", "TuneError", "grid_search",
"register_env", "register_trainable", "run", "run_experiments", "Stopper",
"EarlyStopping", "Experiment", "function", "sample_from", "track",
"uniform", "choice", "randint", "randn", "loguniform",
"ExperimentAnalysis", "Analysis", "CLIReporter", "JupyterNotebookReporter",
"ProgressReporter", "report", "get_trial_dir", "get_trial_name",
"get_trial_id", "make_checkpoint_dir", "save_checkpoint", "checkpoint_dir"
"uniform", "quniform", "choice", "randint", "qrandint", "randn", "qrandn",
"loguniform", "qloguniform", "ExperimentAnalysis", "Analysis",
"CLIReporter", "JupyterNotebookReporter", "ProgressReporter", "report",
"get_trial_dir", "get_trial_name", "get_trial_id", "make_checkpoint_dir",
"save_checkpoint", "checkpoint_dir"
]
141 changes: 109 additions & 32 deletions python/ray/tune/analysis/experiment_analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,18 @@ class Analysis:
"""Analyze all results from a directory of experiments.

To use this class, the experiment must be executed with the JsonLogger.

Args:
experiment_dir (str): Directory of the experiment to load.
default_metric (str): Default metric for comparing results. Can be
overwritten with the ``metric`` parameter in the respective
functions.
default_mode (str): Default mode for comparing results. Has to be one
of [min, max]. Can be overwritten with the ``mode`` parameter
in the respective functions.
"""

def __init__(self, experiment_dir):
def __init__(self, experiment_dir, default_metric=None, default_mode=None):
experiment_dir = os.path.expanduser(experiment_dir)
if not os.path.isdir(experiment_dir):
raise ValueError(
Expand All @@ -31,13 +40,35 @@ def __init__(self, experiment_dir):
self._configs = {}
self._trial_dataframes = {}

self.default_metric = default_metric
if default_mode and default_mode not in ["min", "max"]:
raise ValueError(
"`default_mode` has to be None or one of [min, max]")
self.default_mode = default_mode

if not pd:
logger.warning(
"pandas not installed. Run `pip install pandas` for "
"Analysis utilities.")
else:
self.fetch_trial_dataframes()

def _validate_metric(self, metric):
if not metric and not self.default_metric:
raise ValueError(
"No `metric` has been passed and `default_metric` has "
"not been set. Please specify the `metric` parameter.")
return metric or self.default_metric

def _validate_mode(self, mode):
if not mode and not self.default_mode:
raise ValueError(
"No `mode` has been passed and `default_mode` has "
"not been set. Please specify the `mode` parameter.")
if mode and mode not in ["min", "max"]:
raise ValueError("If set, `mode` has to be one of [min, max]")
return mode or self.default_mode

def dataframe(self, metric=None, mode=None):
"""Returns a pandas.DataFrame object constructed from the trials.

Expand All @@ -57,13 +88,18 @@ def dataframe(self, metric=None, mode=None):
rows[path].update(logdir=path)
return pd.DataFrame(list(rows.values()))

def get_best_config(self, metric, mode="max"):
def get_best_config(self, metric=None, mode=None):
"""Retrieve the best config corresponding to the trial.

Args:
metric (str): Key for trial info to order on.
mode (str): One of [min, max].
metric (str): Key for trial info to order on. Defaults to
``self.default_metric``.
mode (str): One of [min, max]. Defaults to
``self.default_mode``.
"""
metric = self._validate_metric(metric)
mode = self._validate_mode(mode)

rows = self._retrieve_rows(metric=metric, mode=mode)
if not rows:
# only nans encountered when retrieving rows
Expand All @@ -77,13 +113,17 @@ def get_best_config(self, metric, mode="max"):
best_path = compare_op(rows, key=lambda k: rows[k][metric])
return all_configs[best_path]

def get_best_logdir(self, metric, mode="max"):
def get_best_logdir(self, metric=None, mode=None):
"""Retrieve the logdir corresponding to the best trial.

Args:
metric (str): Key for trial info to order on.
mode (str): One of [min, max].
metric (str): Key for trial info to order on. Defaults to
``self.default_metric``.
mode (str): One of [min, max]. Defaults to ``self.default_mode``.
"""
metric = self._validate_metric(metric)
mode = self._validate_mode(mode)

assert mode in ["max", "min"]
df = self.dataframe(metric=metric, mode=mode)
mode_idx = pd.Series.idxmax if mode == "max" else pd.Series.idxmin
Expand Down Expand Up @@ -140,17 +180,20 @@ def get_all_configs(self, prefix=False):
"Couldn't read config from {} paths".format(fail_count))
return self._configs

def get_trial_checkpoints_paths(self, trial, metric=TRAINING_ITERATION):
def get_trial_checkpoints_paths(self, trial, metric=None):
"""Gets paths and metrics of all persistent checkpoints of a trial.

Args:
trial (Trial): The log directory of a trial, or a trial instance.
metric (str): key for trial info to return, e.g. "mean_accuracy".
"training_iteration" is used by default.
"training_iteration" is used by default if no value was
passed to ``self.default_metric``.

Returns:
List of [path, metric] for all persistent checkpoints of the trial.
"""
metric = metric or self.default_metric or TRAINING_ITERATION

if isinstance(trial, str):
trial_dir = os.path.expanduser(trial)
# Get checkpoints from logdir.
Expand All @@ -167,20 +210,22 @@ def get_trial_checkpoints_paths(self, trial, metric=TRAINING_ITERATION):
else:
raise ValueError("trial should be a string or a Trial instance.")

def get_best_checkpoint(self, trial, metric=TRAINING_ITERATION,
mode="max"):
def get_best_checkpoint(self, trial, metric=None, mode=None):
"""Gets best persistent checkpoint path of provided trial.

Args:
trial (Trial): The log directory of a trial, or a trial instance.
metric (str): key of trial info to return, e.g. "mean_accuracy".
"training_iteration" is used by default.
mode (str): Either "min" or "max".
"training_iteration" is used by default if no value was
passed to ``self.default_metric``.
mode (str): One of [min, max]. Defaults to ``self.default_mode``.

Returns:
Path for best checkpoint of trial determined by metric
"""
assert mode in ["max", "min"]
metric = metric or self.default_metric or TRAINING_ITERATION
mode = self._validate_mode(mode)

checkpoint_paths = self.get_trial_checkpoints_paths(trial, metric)
if mode == "max":
return max(checkpoint_paths, key=lambda x: x[1])[0]
Expand Down Expand Up @@ -235,14 +280,24 @@ class ExperimentAnalysis(Analysis):
Experiment.local_dir/Experiment.name/experiment_state.json
trials (list|None): List of trials that can be accessed via
`analysis.trials`.
default_metric (str): Default metric for comparing results. Can be
overwritten with the ``metric`` parameter in the respective
functions.
default_mode (str): Default mode for comparing results. Has to be one
of [min, max]. Can be overwritten with the ``mode`` parameter
in the respective functions.

Example:
>>> tune.run(my_trainable, name="my_exp", local_dir="~/tune_results")
>>> analysis = ExperimentAnalysis(
>>> experiment_checkpoint_path="~/tune_results/my_exp/state.json")
"""

def __init__(self, experiment_checkpoint_path, trials=None):
def __init__(self,
experiment_checkpoint_path,
trials=None,
default_metric=None,
default_mode=None):
experiment_checkpoint_path = os.path.expanduser(
experiment_checkpoint_path)
if not os.path.isfile(experiment_checkpoint_path):
Expand All @@ -256,17 +311,24 @@ def __init__(self, experiment_checkpoint_path, trials=None):
raise TuneError("Experiment state invalid; no checkpoints found.")
self._checkpoints = _experiment_state["checkpoints"]
self.trials = trials

super(ExperimentAnalysis, self).__init__(
os.path.dirname(experiment_checkpoint_path))
os.path.dirname(experiment_checkpoint_path), default_metric,
default_mode)

def get_best_trial(self, metric, mode="max", scope="all"):
def get_best_trial(self, metric=None, mode=None, scope="all"):
"""Retrieve the best trial object.

Compares all trials' scores on `metric`.
Compares all trials' scores on ``metric``.
If ``metric`` is not specified, ``self.default_metric`` will be used.
If `mode` is not specified, ``self.default_mode`` will be used.
These values are usually initialized by passing the ``metric`` and
``mode`` parameters to ``tune.run()``.

Args:
metric (str): Key for trial info to order on.
mode (str): One of [min, max].
metric (str): Key for trial info to order on. Defaults to
``self.default_metric``.
mode (str): One of [min, max]. Defaults to ``self.default_mode``.
scope (str): One of [all, last, avg, last-5-avg, last-10-avg].
If `scope=last`, only look at each trial's final step for
`metric`, and compare across trials based on `mode=[min,max]`.
Expand All @@ -278,16 +340,17 @@ def get_best_trial(self, metric, mode="max", scope="all"):
If `scope=all`, find each trial's min/max score for `metric`
based on `mode`, and compare trials based on `mode=[min,max]`.
"""
if mode not in ["max", "min"]:
raise ValueError(
"ExperimentAnalysis: attempting to get best trial for "
"metric {} for mode {} not in [\"max\", \"min\"]".format(
metric, mode))
metric = self._validate_metric(metric)
mode = self._validate_mode(mode)

if scope not in ["all", "last", "avg", "last-5-avg", "last-10-avg"]:
raise ValueError(
"ExperimentAnalysis: attempting to get best trial for "
"metric {} for scope {} not in [\"all\", \"last\", \"avg\", "
"\"last-5-avg\", \"last-10-avg\"]".format(metric, scope))
"\"last-5-avg\", \"last-10-avg\"]. "
"If you didn't pass a `metric` parameter to `tune.run()`, "
"you have to pass one when fetching the best trial.".format(
metric, scope))
best_trial = None
best_metric_score = None
for trial in self.trials:
Expand All @@ -311,16 +374,25 @@ def get_best_trial(self, metric, mode="max", scope="all"):
best_metric_score = metric_score
best_trial = trial

if not best_trial:
logger.warning(
"Could not find best trial. Did you pass the correct `metric`"
"parameter?")
return best_trial

def get_best_config(self, metric, mode="max", scope="all"):
def get_best_config(self, metric=None, mode=None, scope="all"):
"""Retrieve the best config corresponding to the trial.

Compares all trials' scores on `metric`.
If ``metric`` is not specified, ``self.default_metric`` will be used.
If `mode` is not specified, ``self.default_mode`` will be used.
These values are usually initialized by passing the ``metric`` and
``mode`` parameters to ``tune.run()``.

Args:
metric (str): Key for trial info to order on.
mode (str): One of [min, max].
metric (str): Key for trial info to order on. Defaults to
``self.default_metric``.
mode (str): One of [min, max]. Defaults to ``self.default_mode``.
scope (str): One of [all, last, avg, last-5-avg, last-10-avg].
If `scope=last`, only look at each trial's final step for
`metric`, and compare across trials based on `mode=[min,max]`.
Expand All @@ -335,14 +407,19 @@ def get_best_config(self, metric, mode="max", scope="all"):
best_trial = self.get_best_trial(metric, mode, scope)
return best_trial.config if best_trial else None

def get_best_logdir(self, metric, mode="max", scope="all"):
def get_best_logdir(self, metric=None, mode=None, scope="all"):
"""Retrieve the logdir corresponding to the best trial.

Compares all trials' scores on `metric`.
If ``metric`` is not specified, ``self.default_metric`` will be used.
If `mode` is not specified, ``self.default_mode`` will be used.
These values are usually initialized by passing the ``metric`` and
``mode`` parameters to ``tune.run()``.

Args:
metric (str): Key for trial info to order on.
mode (str): One of [min, max].
metric (str): Key for trial info to order on. Defaults to
``self.default_metric``.
mode (str): One of [min, max]. Defaults to ``self.default_mode``.
scope (str): One of [all, last, avg, last-5-avg, last-10-avg].
If `scope=last`, only look at each trial's final step for
`metric`, and compare across trials based on `mode=[min,max]`.
Expand Down
2 changes: 1 addition & 1 deletion python/ray/tune/examples/ax_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ def easy_objective(config):
parameter_constraints=["x1 + x2 <= 2.0"], # Optional.
outcome_constraints=["l2norm <= 1.25"], # Optional.
)
algo = AxSearch(client, max_concurrent=4)
algo = AxSearch(ax_client=client, max_concurrent=4)
scheduler = AsyncHyperBandScheduler(metric="hartmann6", mode="min")
tune.run(
easy_objective,
Expand Down
7 changes: 2 additions & 5 deletions python/ray/tune/examples/logging_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,6 @@
import argparse
import json
import os
import random

import numpy as np

from ray import tune
Expand Down Expand Up @@ -64,7 +62,6 @@ def load_checkpoint(self, checkpoint_path):
loggers=[TestLogger],
stop={"training_iteration": 1 if args.smoke_test else 99999},
config={
"width": tune.sample_from(
lambda spec: 10 + int(90 * random.random())),
"height": tune.sample_from(lambda spec: int(100 * random.random()))
"width": tune.randint(10, 100),
"height": tune.loguniform(10, 100)
})
Loading