Skip to content

Commit e1212d7

Browse files
skzhang1“skzhang1”
and
“skzhang1”
authored
support string alg in tune (microsoft#1093)
* support string alg in tune * add test, enforce string feasible, support lexico in set_search_priorities in CFO * fix bug * fix bug * fix bug * fix bug * fix bugs * fix yiran --------- Co-authored-by: “skzhang1” <“[email protected]”>
1 parent 3f5b6bb commit e1212d7

File tree

3 files changed

+148
-21
lines changed

3 files changed

+148
-21
lines changed

flaml/tune/tune.py

+52-18
Original file line numberDiff line numberDiff line change
@@ -344,7 +344,7 @@ def easy_objective(config):
344344
# do cleanup operation here
345345
return
346346
```
347-
search_alg: An instance of BlendSearch as the search algorithm
347+
search_alg: An instance/string of the search algorithm
348348
to be used. The same instance can be used for iterative tuning.
349349
e.g.,
350350
@@ -481,12 +481,25 @@ def easy_objective(config):
481481
else:
482482
logger.setLevel(logging.CRITICAL)
483483

484-
from .searcher.blendsearch import BlendSearch, CFO
484+
from .searcher.blendsearch import BlendSearch, CFO, RandomSearch
485485

486486
if lexico_objectives is not None:
487-
logger.warning("If lexico_objectives is not None, search_alg is forced to be CFO")
488-
search_alg = None
489-
if search_alg is None:
487+
if "modes" not in lexico_objectives.keys():
488+
lexico_objectives["modes"] = ["min"] * len(lexico_objectives["metrics"])
489+
for t_metric, t_mode in zip(lexico_objectives["metrics"], lexico_objectives["modes"]):
490+
if t_metric not in lexico_objectives["tolerances"].keys():
491+
lexico_objectives["tolerances"][t_metric] = 0
492+
if t_metric not in lexico_objectives["targets"].keys():
493+
lexico_objectives["targets"][t_metric] = -float("inf") if t_mode == "min" else float("inf")
494+
if search_alg is None or isinstance(search_alg, str):
495+
if isinstance(search_alg, str):
496+
assert search_alg in [
497+
"BlendSearch",
498+
"CFO",
499+
"CFOCat",
500+
"RandomSearch",
501+
], f"search_alg={search_alg} is not recognized. 'BlendSearch', 'CFO', 'CFOcat' and 'RandomSearch' are supported."
502+
490503
flaml_scheduler_resource_attr = (
491504
flaml_scheduler_min_resource
492505
) = flaml_scheduler_max_resource = flaml_scheduler_reduction_factor = None
@@ -500,20 +513,30 @@ def easy_objective(config):
500513
flaml_scheduler_max_resource = max_resource
501514
flaml_scheduler_reduction_factor = reduction_factor
502515
scheduler = None
503-
if lexico_objectives is None:
504-
try:
505-
import optuna as _
516+
if lexico_objectives:
517+
# TODO: Modify after supporting BlendSearch in lexicographic optimization
518+
SearchAlgorithm = CFO
519+
logger.info(
520+
f"Using search algorithm {SearchAlgorithm.__name__} for lexicographic optimization. Note that when providing other search algorithms, we use CFO instead temporarily."
521+
)
522+
metric = lexico_objectives["metrics"][0] or DEFAULT_METRIC
523+
else:
524+
if not search_alg or search_alg == "BlendSearch":
525+
try:
526+
import optuna as _
506527

507-
SearchAlgorithm = BlendSearch
528+
SearchAlgorithm = BlendSearch
529+
logger.info("Using search algorithm {}.".format(SearchAlgorithm.__name__))
530+
except ImportError:
531+
if search_alg == "BlendSearch":
532+
raise ValueError("To use BlendSearch, run: pip install flaml[blendsearch]")
533+
else:
534+
SearchAlgorithm = CFO
535+
logger.warning("Using CFO for search. To use BlendSearch, run: pip install flaml[blendsearch]")
536+
else:
537+
SearchAlgorithm = locals()[search_alg]
508538
logger.info("Using search algorithm {}.".format(SearchAlgorithm.__name__))
509-
except ImportError:
510-
SearchAlgorithm = CFO
511-
logger.warning("Using CFO for search. To use BlendSearch, run: pip install flaml[blendsearch]")
512539
metric = metric or DEFAULT_METRIC
513-
else:
514-
SearchAlgorithm = CFO
515-
logger.info("Using search algorithm {}.".format(SearchAlgorithm.__name__))
516-
metric = lexico_objectives["metrics"][0] or DEFAULT_METRIC
517540
search_alg = SearchAlgorithm(
518541
metric=metric,
519542
mode=mode,
@@ -535,8 +558,12 @@ def easy_objective(config):
535558
)
536559
else:
537560
if metric is None or mode is None:
538-
metric = metric or search_alg.metric or DEFAULT_METRIC
539-
mode = mode or search_alg.mode
561+
if lexico_objectives:
562+
metric = lexico_objectives["metrics"][0] or metric or search_alg.metric or DEFAULT_METRIC
563+
mode = lexico_objectives["modes"][0] or mode or search_alg.mode
564+
else:
565+
metric = metric or search_alg.metric or DEFAULT_METRIC
566+
mode = mode or search_alg.mode
540567
if ray_available and use_ray:
541568
if ray_version.startswith("1."):
542569
from ray.tune.suggest import ConcurrencyLimiter
@@ -555,6 +582,13 @@ def easy_objective(config):
555582
):
556583
search_alg.use_incumbent_result_in_evaluation = use_incumbent_result_in_evaluation
557584
searcher = search_alg.searcher if isinstance(search_alg, ConcurrencyLimiter) else search_alg
585+
if lexico_objectives:
586+
# TODO: Modify after supporting BlendSearch in lexicographic optimization
587+
assert search_alg.__class__.__name__ in [
588+
"CFO",
589+
], "If lexico_objectives is not None, the search_alg must be CFO for now."
590+
search_alg.lexico_objective = lexico_objectives
591+
558592
if isinstance(searcher, BlendSearch):
559593
setting = {}
560594
if time_budget_s:

setup.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,7 @@
8585
"tensorboardX==2.6", # test_forecast_panel
8686
"requests<2.29.0", # https://github.com/docker/docker-py/issues/3113
8787
"packaging",
88-
"pydantic",
88+
"pydantic==1.10.9",
8989
"sympy",
9090
"wolframalpha",
9191
"joblib<1.3.0", # temp solution for joblib 1.3.0 issue, no need once https://github.com/joblib/joblib-spark/pull/48 is merged
@@ -137,7 +137,7 @@
137137
"benchmark": ["catboost>=0.26", "psutil==5.8.0", "xgboost==1.3.3", "pandas==1.1.4"],
138138
"openai": ["openai==0.27.8", "diskcache"],
139139
"autogen": ["openai==0.27.8", "diskcache", "docker"],
140-
"mathchat": ["openai==0.27.8", "diskcache", "docker", "sympy", "pydantic", "wolframalpha"],
140+
"mathchat": ["openai==0.27.8", "diskcache", "docker", "sympy", "pydantic==1.10.9", "wolframalpha"],
141141
"synapse": [
142142
"joblibspark>=0.5.0",
143143
"optuna==2.8.0",

test/tune/test_tune.py

+94-1
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,14 @@
11
"""Require: pip install flaml[test,ray]
22
"""
3-
from flaml import BlendSearch
3+
from flaml import BlendSearch, CFO
44
import time
55
import os
66
from sklearn.model_selection import train_test_split
77
import sklearn.metrics
88
import sklearn.datasets
99
import xgboost as xgb
1010
import logging
11+
import math
1112

1213
try:
1314
from ray.tune.integration.xgboost import TuneReportCheckpointCallback
@@ -20,6 +21,32 @@
2021
logger.setLevel(logging.INFO)
2122

2223

24+
def _BraninCurrin(config):
25+
# Rescale brain
26+
x_1 = 15 * config["x1"] - 5
27+
x_2 = 15 * config["x2"]
28+
# Brain function
29+
t1 = x_2 - 5.1 / (4 * math.pi**2) * x_1**2 + 5 / math.pi * x_1 - 6
30+
t2 = 10 * (1 - 1 / (8 * math.pi)) * math.cos(x_1)
31+
brain_result = t1**2 + t2 + 10
32+
# Currin function
33+
xc_1 = config["x1"]
34+
xc_2 = config["x2"]
35+
factor1 = 1 - math.exp(-1 / (2 * xc_2))
36+
numer = 2300 * pow(xc_1, 3) + 1900 * pow(xc_1, 2) + 2092 * xc_1 + 60
37+
denom = 100 * pow(xc_1, 3) + 500 * pow(xc_1, 2) + 4 * xc_1 + 20
38+
currin_result = factor1 * numer / denom
39+
return {"brain": brain_result, "currin": currin_result}
40+
41+
42+
def _easy_objective(config):
43+
# Hyperparameters
44+
width, height, step = config["width"], config["height"], config["steps"]
45+
46+
# get_result
47+
return {"mean_loss": (0.1 + width * step / 100) ** (-1) + height * 0.1}
48+
49+
2350
def test_nested_run():
2451
from flaml import AutoML, tune
2552

@@ -352,6 +379,72 @@ def evaluate_config_empty(config):
352379
)
353380

354381

382+
def test_passing_search_alg():
383+
from flaml import tune
384+
385+
# search_space
386+
so_search_space = {
387+
"steps": 100,
388+
"width": tune.uniform(0, 20),
389+
"height": tune.uniform(-100, 100),
390+
}
391+
mo_search_space = {
392+
"x1": tune.uniform(lower=0.000001, upper=1.0),
393+
"x2": tune.uniform(lower=0.000001, upper=1.0),
394+
}
395+
396+
# lexicographic objectives
397+
lexico_objectives = {}
398+
lexico_objectives["metrics"] = ["brain", "currin"]
399+
lexico_objectives["tolerances"] = {"brain": 10.0, "currin": 0.0}
400+
lexico_objectives["targets"] = {"brain": 0.0, "currin": 0.0}
401+
lexico_objectives["modes"] = ["min", "min"]
402+
403+
## Passing search_alg through string
404+
# Non lexico tune
405+
tune.run(
406+
_easy_objective,
407+
search_alg="BlendSearch",
408+
metric="mean_loss",
409+
mode="min",
410+
num_samples=10,
411+
config=so_search_space,
412+
)
413+
# lexico tune
414+
tune.run(
415+
_BraninCurrin, search_alg="CFO", num_samples=10, config=mo_search_space, lexico_objectives=lexico_objectives
416+
)
417+
tune.run(
418+
_BraninCurrin,
419+
search_alg="BlendSearch",
420+
num_samples=10,
421+
config=mo_search_space,
422+
lexico_objectives=lexico_objectives,
423+
)
424+
425+
## Passing search_alg through instance
426+
so_bs = BlendSearch(time_budget_s=5, metric="mean_loss", mode="min")
427+
# TODO: We will change CFO into blendsearch in the future
428+
mo_bs = CFO(time_budget_s=5)
429+
# Non lexico tune
430+
tune.run(
431+
_easy_objective,
432+
search_alg=so_bs,
433+
metric="mean_loss",
434+
mode="min",
435+
num_samples=10,
436+
config=so_search_space,
437+
)
438+
# lexico tune
439+
tune.run(
440+
_BraninCurrin,
441+
search_alg=mo_bs,
442+
num_samples=10,
443+
config=mo_search_space,
444+
lexico_objectives=lexico_objectives,
445+
)
446+
447+
355448
def test_xgboost_bs():
356449
_test_xgboost()
357450

0 commit comments

Comments
 (0)