Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Convert functional algo tests to python API #418

Merged
merged 4 commits into from
Jul 2, 2020
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 1 addition & 5 deletions src/orion/algo/hyperband.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,8 +147,6 @@ def __init__(self, space, seed=None, repetitions=numpy.inf):
for bracket_budgets in self.budgets
]

self.seed_rng(seed)

def sample(self, num, bracket, buffer=10):
"""Sample new points from bracket"""
sample_buffer = bracket.rungs[0]['n_trials'] * buffer
Expand Down Expand Up @@ -186,7 +184,7 @@ def seed_rng(self, seed):
"""
self.seed = seed
for i, bracket in enumerate(self.brackets):
bracket.seed_rng(seed + i if seed is not None else None)
bracket.seed_rng(self.executed_times + seed + i if seed is not None else None)
self.rng = numpy.random.RandomState(seed)

@property
Expand Down Expand Up @@ -276,8 +274,6 @@ def _refresh_bracket(self):
Bracket(self, bracket_budgets, self.executed_times + 1)
for bracket_budgets in self.budgets
]
if self.seed is not None:
self.seed += 1

def _get_bracket(self, point):
"""Get the bracket of a point during observe"""
Expand Down
21 changes: 0 additions & 21 deletions tests/functional/algos/asha_config.yaml

This file was deleted.

46 changes: 0 additions & 46 deletions tests/functional/algos/black_box.py

This file was deleted.

13 changes: 0 additions & 13 deletions tests/functional/algos/hyperband.yaml

This file was deleted.

13 changes: 0 additions & 13 deletions tests/functional/algos/random_config.yaml

This file was deleted.

225 changes: 118 additions & 107 deletions tests/functional/algos/test_algos.py
Original file line number Diff line number Diff line change
@@ -1,141 +1,152 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Perform a functional test for algos included with orion."""
import os
import copy
import random

import pytest
import yaml

import orion.core.cli
from orion.storage.base import get_storage


config_files = ['random_config.yaml', 'tpe.yaml']
fidelity_config_files = ['random_config.yaml', 'asha_config.yaml', 'hyperband.yaml']
fidelity_only_config_files = list(set(fidelity_config_files) - set(config_files))


@pytest.mark.usefixtures("clean_db")
@pytest.mark.usefixtures("null_db_instances")
@pytest.mark.parametrize('config_file', fidelity_only_config_files)
def test_missing_fidelity(monkeypatch, config_file):
from orion.client import workon


storage = {
'type': 'legacy',
'database': {'type': 'ephemeraldb'}}


space = {
'x': 'uniform(-50, 50)'
}


space_with_fidelity = {
'x': space['x'],
'noise': 'fidelity(1,10,4)'}


algorithm_configs = {
'random': {
'random': {
'seed': 1}},
'tpe': {
'tpe': {
'seed': 1,
'n_initial_points': 20,
'n_ei_candidates': 24,
'gamma': 0.25,
'equal_weight': False,
'prior_weight': 1.0,
'full_weight_num': 25}},
'asha': {
'asha': {
'seed': 1,
'num_rungs': 4,
'num_brackets': 1,
'grace_period': None,
'max_resources': None,
'reduction_factor': None}},
'hyperband': {
'hyperband': {
'repetitions': 5,
'seed': 1}}
}

no_fidelity_algorithms = ['random', 'tpe']
no_fidelity_algorithm_configs = {key: algorithm_configs[key] for key in no_fidelity_algorithms}

fidelity_only_algorithms = ['asha', 'hyperband']
fidelity_only_algorithm_configs = {key: algorithm_configs[key] for key in fidelity_only_algorithms}


def rosenbrock(x, noise=None):
"""Evaluate partial information of a quadratic."""
z = (x - 34.56789)
if noise:
noise = (1 - noise / 10) + 0.0001
z *= random.gauss(0, noise)

return [{'name': 'objective', 'type': 'objective', 'value': 4 * z**2 + 23.4},
{'name': 'gradient', 'type': 'gradient', 'value': [8 * z]}]


@pytest.mark.parametrize(
'algorithm',
fidelity_only_algorithm_configs.values(),
ids=list(fidelity_only_algorithm_configs.keys()))
def test_missing_fidelity(algorithm):
"""Test a simple usage scenario."""
monkeypatch.chdir(os.path.dirname(os.path.abspath(__file__)))

with pytest.raises(RuntimeError) as exc:
orion.core.cli.main(["hunt", "--config", config_file,
"./black_box.py", "-x~uniform(-50, 50)"])
workon(rosenbrock, space, algorithms=algorithm, max_trials=100)

assert "https://orion.readthedocs.io/en/develop/user/algorithms.html" in str(exc.value)


@pytest.mark.usefixtures("clean_db")
@pytest.mark.usefixtures("null_db_instances")
@pytest.mark.parametrize('config_file', config_files)
def test_simple(monkeypatch, config_file):
@pytest.mark.parametrize(
'algorithm',
no_fidelity_algorithm_configs.values(),
ids=list(no_fidelity_algorithm_configs.keys()))
def test_simple(algorithm):
"""Test a simple usage scenario."""
monkeypatch.chdir(os.path.dirname(os.path.abspath(__file__)))
orion.core.cli.main(["hunt", "--config", config_file,
"./black_box.py", "-x~uniform(-50, 50)"])

with open(config_file, 'rb') as f:
config = yaml.safe_load(f)

storage = get_storage()
exp = list(storage.fetch_experiments({'name': config['name']}))
assert len(exp) == 1
exp = exp[0]
assert '_id' in exp
exp_id = exp['_id']
assert exp['name'] == config['name']
assert exp['pool_size'] == 1
assert exp['max_trials'] == 100
assert exp['algorithms'] == config['algorithms']
assert 'user' in exp['metadata']
assert 'datetime' in exp['metadata']
assert 'orion_version' in exp['metadata']
assert 'user_script' in exp['metadata']
assert exp['metadata']['user_args'] == ['./black_box.py', '-x~uniform(-50, 50)']

trials = storage.fetch_trials(uid=exp_id)
assert len(trials) <= config['max_trials']
max_trials = 100
exp = workon(rosenbrock, space, algorithms=algorithm, max_trials=max_trials)

assert exp.max_trials == max_trials
assert exp.configuration['algorithms'] == algorithm

trials = exp.fetch_trials()
assert len(trials) == max_trials
assert trials[-1].status == 'completed'

best_trial = next(iter(sorted(trials, key=lambda trial: trial.objective.value)))
assert best_trial.objective.name == 'example_objective'
assert abs(best_trial.objective.value - 23.4) < 1e-5
best_trial = sorted(trials, key=lambda trial: trial.objective.value)[0]
assert best_trial.objective.name == 'objective'
assert abs(best_trial.objective.value - 23.4) < 1e-2
assert len(best_trial.params) == 1
param = best_trial._params[0]
assert param.name == '/x'
assert param.name == 'x'
assert param.type == 'real'


@pytest.mark.usefixtures("clean_db")
@pytest.mark.usefixtures("null_db_instances")
@pytest.mark.parametrize('config_file', config_files)
def test_random_stop(monkeypatch, config_file):
"""Test a simple usage scenario."""
monkeypatch.chdir(os.path.dirname(os.path.abspath(__file__)))
orion.core.cli.main(["hunt", "--config", config_file,
"./black_box.py", "-x~uniform(-10, 5, discrete=True)"])

with open(config_file, 'rb') as f:
config = yaml.safe_load(f)

storage = get_storage()
exp = list(storage.fetch_experiments({'name': config['name']}))
assert len(exp) == 1
exp = exp[0]
assert '_id' in exp
exp_id = exp['_id']

trials = storage.fetch_trials(uid=exp_id)
assert len(trials) <= config['max_trials']
@pytest.mark.parametrize(
'algorithm',
no_fidelity_algorithm_configs.values(),
ids=list(no_fidelity_algorithm_configs.keys()))
def test_cardinality_stop(algorithm):
"""Test when algo needs to stop because all space is explored (dicrete space)."""
discrete_space = copy.deepcopy(space)
discrete_space['x'] = 'uniform(-10, 5, discrete=True)'
exp = workon(rosenbrock, discrete_space, algorithms=algorithm, max_trials=100)

trials = exp.fetch_trials()
assert len(trials) == 15
assert trials[-1].status == 'completed'


@pytest.mark.usefixtures("clean_db")
@pytest.mark.usefixtures("null_db_instances")
@pytest.mark.parametrize('config_file', fidelity_config_files)
def test_with_fidelity(database, monkeypatch, config_file):
@pytest.mark.parametrize(
'algorithm',
algorithm_configs.values(),
ids=list(algorithm_configs.keys()))
def test_with_fidelity(algorithm):
"""Test a scenario with fidelity."""
monkeypatch.chdir(os.path.dirname(os.path.abspath(__file__)))
orion.core.cli.main(["hunt", "--config", config_file,
"./black_box.py", "-x~uniform(-50, 50, precision=None)",
"--fidelity~fidelity(1,10,4)"])

with open(config_file, 'rb') as f:
config = yaml.safe_load(f)

storage = get_storage()
exp = list(storage.fetch_experiments({'name': config['name']}))
assert len(exp) == 1
exp = exp[0]
assert '_id' in exp
exp_id = exp['_id']
assert exp['name'] == config['name']
assert exp['pool_size'] == 1
assert exp['max_trials'] == 100
assert exp['algorithms'] == config['algorithms']
assert 'user' in exp['metadata']
assert 'datetime' in exp['metadata']
assert 'orion_version' in exp['metadata']
assert 'user_script' in exp['metadata']
assert exp['metadata']['user_args'] == ['./black_box.py', '-x~uniform(-50, 50, precision=None)',
'--fidelity~fidelity(1,10,4)']

trials = storage.fetch_trials(uid=exp_id)
assert len(trials) <= config['max_trials']
exp = workon(rosenbrock, space_with_fidelity, algorithms=algorithm, max_trials=100)

assert exp.configuration['algorithms'] == algorithm

trials = exp.fetch_trials()
assert len(trials) <= 100
assert trials[-1].status == 'completed'

results = [trial.objective.value for trial in trials]
print(min(results))
print(max(results))
best_trial = next(iter(sorted(trials, key=lambda trial: trial.objective.value)))
assert best_trial.objective.name == 'example_objective'

assert best_trial.objective.name == 'objective'
assert abs(best_trial.objective.value - 23.4) < 1e-5
assert len(best_trial.params) == 2
fidelity = best_trial._params[0]
assert fidelity.name == '/fidelity'
assert fidelity.name == 'noise'
assert fidelity.type == 'fidelity'
assert fidelity.value == 10
param = best_trial._params[1]
assert param.name == '/x'
assert param.name == 'x'
assert param.type == 'real'
Loading