diff --git a/python/ray/tune/examples/hyperopt_example.py b/python/ray/tune/examples/hyperopt_example.py index d70d16b9488e..0dce74d52401 100644 --- a/python/ray/tune/examples/hyperopt_example.py +++ b/python/ray/tune/examples/hyperopt_example.py @@ -43,6 +43,19 @@ def easy_objective(config, reporter): 'activation': hp.choice("activation", ["relu", "tanh"]) } + current_best_params = [ + { + "width": 1, + "height": 2, + "activation": 0 # Activation will be relu + }, + { + "width": 4, + "height": 2, + "activation": 1 # Activation will be tanh + } + ] + config = { "my_exp": { "run": "exp", @@ -55,6 +68,10 @@ def easy_objective(config, reporter): }, } } - algo = HyperOptSearch(space, max_concurrent=4, reward_attr="neg_mean_loss") + algo = HyperOptSearch( + space, + max_concurrent=4, + reward_attr="neg_mean_loss", + points_to_evaluate=current_best_params) scheduler = AsyncHyperBandScheduler(reward_attr="neg_mean_loss") run_experiments(config, search_alg=algo, scheduler=scheduler) diff --git a/python/ray/tune/suggest/hyperopt.py b/python/ray/tune/suggest/hyperopt.py index 2c1c1317616d..2c32562505f9 100644 --- a/python/ray/tune/suggest/hyperopt.py +++ b/python/ray/tune/suggest/hyperopt.py @@ -10,6 +10,7 @@ hyperopt_logger = logging.getLogger("hyperopt") hyperopt_logger.setLevel(logging.WARNING) import hyperopt as hpo + from hyperopt.fmin import generate_trials_to_calculate except Exception: hpo = None @@ -33,6 +34,13 @@ class HyperOptSearch(SuggestionAlgorithm): to 10. reward_attr (str): The training result objective value attribute. This refers to an increasing value. + points_to_evaluate (list): Initial parameter suggestions to be run + first. This is for when you already have some good parameters + you want hyperopt to run first to help the TPE algorithm + make better suggestions for future parameters. Needs to be + a list of dict of hyperopt-named variables. + Choice variables should be indicated by their index in the + list (see example) Example: >>> space = { @@ -40,6 +48,11 @@ class HyperOptSearch(SuggestionAlgorithm): >>> 'height': hp.uniform('height', -100, 100), >>> 'activation': hp.choice("activation", ["relu", "tanh"]) >>> } + >>> current_best_params = [{ + >>> 'width': 10, + >>> 'height': 0, + >>> 'activation': 0, # The index of "relu" + >>> }] >>> config = { >>> "my_exp": { >>> "run": "exp", @@ -50,13 +63,15 @@ class HyperOptSearch(SuggestionAlgorithm): >>> } >>> } >>> algo = HyperOptSearch( - >>> space, max_concurrent=4, reward_attr="neg_mean_loss") + >>> space, max_concurrent=4, reward_attr="neg_mean_loss", + >>> points_to_evaluate=current_best_params) """ def __init__(self, space, max_concurrent=10, reward_attr="episode_reward_mean", + points_to_evaluate=None, **kwargs): assert hpo is not None, "HyperOpt must be installed!" assert type(max_concurrent) is int and max_concurrent > 0 @@ -64,7 +79,15 @@ def __init__(self, self._reward_attr = reward_attr self.algo = hpo.tpe.suggest self.domain = hpo.Domain(lambda spc: spc, space) - self._hpopt_trials = hpo.Trials() + if points_to_evaluate is None: + self._hpopt_trials = hpo.Trials() + self._points_to_evaluate = 0 + else: + assert type(points_to_evaluate) == list + self._hpopt_trials = generate_trials_to_calculate( + points_to_evaluate) + self._hpopt_trials.refresh() + self._points_to_evaluate = len(points_to_evaluate) self._live_trial_mapping = {} self.rstate = np.random.RandomState() @@ -73,15 +96,20 @@ def __init__(self, def _suggest(self, trial_id): if self._num_live_trials() >= self._max_concurrent: return None - new_ids = self._hpopt_trials.new_trial_ids(1) - self._hpopt_trials.refresh() - # Get new suggestion from Hyperopt - new_trials = self.algo(new_ids, self.domain, self._hpopt_trials, - self.rstate.randint(2**31 - 1)) - self._hpopt_trials.insert_trial_docs(new_trials) - self._hpopt_trials.refresh() - new_trial = new_trials[0] + if self._points_to_evaluate > 0: + new_trial = self._hpopt_trials.trials[self._points_to_evaluate - 1] + self._points_to_evaluate -= 1 + else: + new_ids = self._hpopt_trials.new_trial_ids(1) + self._hpopt_trials.refresh() + + # Get new suggestion from Hyperopt + new_trials = self.algo(new_ids, self.domain, self._hpopt_trials, + self.rstate.randint(2**31 - 1)) + self._hpopt_trials.insert_trial_docs(new_trials) + self._hpopt_trials.refresh() + new_trial = new_trials[0] self._live_trial_mapping[trial_id] = (new_trial["tid"], new_trial) # Taken from HyperOpt.base.evaluate