Skip to content

Commit ac547ac

Browse files
esantorellafacebook-github-bot
authored andcommitted
Run ModelLauncher with a BenchmarkRunner and BenchmarkMetric (facebook#2681)
Summary: Pull Request resolved: facebook#2681 Similar to how Jennaton was migrated in D60996475, this change switches the ModelLauncher problem to use `BenchmarkRunner` and `BenchmarkMetric`. Reviewed By: Balandat Differential Revision: D61397457
1 parent 65369f1 commit ac547ac

File tree

1 file changed

+53
-38
lines changed

1 file changed

+53
-38
lines changed

Diff for: ax/benchmark/benchmark_problem.py

+53-38
Original file line numberDiff line numberDiff line change
@@ -93,6 +93,54 @@ class BenchmarkProblem(Base):
9393
is_noiseless: bool
9494

9595

96+
# TODO: Support constrained MOO problems.
97+
def get_soo_config_and_outcome_names(
98+
*,
99+
num_constraints: int,
100+
lower_is_better: bool,
101+
observe_noise_sd: bool,
102+
objective_name: str,
103+
) -> tuple[OptimizationConfig, list[str]]:
104+
105+
objective = Objective(
106+
metric=BenchmarkMetric(
107+
name=objective_name,
108+
lower_is_better=lower_is_better,
109+
observe_noise_sd=observe_noise_sd,
110+
outcome_index=0,
111+
),
112+
minimize=lower_is_better,
113+
)
114+
115+
outcome_names = [objective_name]
116+
outcome_constraints = []
117+
118+
# NOTE: Currently we don't support the case where only some of the
119+
# outcomes have noise levels observed.
120+
121+
for i in range(num_constraints):
122+
outcome_name = f"constraint_slack_{i}"
123+
outcome_constraints.append(
124+
OutcomeConstraint(
125+
metric=BenchmarkMetric(
126+
name=outcome_name,
127+
lower_is_better=False, # positive slack = feasible
128+
observe_noise_sd=observe_noise_sd,
129+
outcome_index=i,
130+
),
131+
op=ComparisonOp.GEQ,
132+
bound=0.0,
133+
relative=False,
134+
)
135+
)
136+
outcome_names.append(outcome_name)
137+
138+
opt_config = OptimizationConfig(
139+
objective=objective, outcome_constraints=outcome_constraints
140+
)
141+
return opt_config, outcome_names
142+
143+
96144
def create_single_objective_problem_from_botorch(
97145
test_problem_class: type[SyntheticTestFunction],
98146
test_problem_kwargs: dict[str, Any],
@@ -138,46 +186,13 @@ def create_single_objective_problem_from_botorch(
138186
test_problem=test_problem, observe_noise_sd=observe_noise_sd, dim=dim
139187
)
140188

141-
# TODO: Support constrained MOO problems.
142-
143-
objective = Objective(
144-
metric=BenchmarkMetric(
145-
name=name,
146-
lower_is_better=lower_is_better,
147-
observe_noise_sd=observe_noise_sd,
148-
outcome_index=0,
149-
),
150-
minimize=lower_is_better,
189+
optimization_config, outcome_names = get_soo_config_and_outcome_names(
190+
num_constraints=test_problem.num_constraints if is_constrained else 0,
191+
lower_is_better=lower_is_better,
192+
observe_noise_sd=observe_noise_sd,
193+
objective_name=name,
151194
)
152195

153-
outcome_names = [name]
154-
outcome_constraints = []
155-
156-
# NOTE: Currently we don't support the case where only some of the
157-
# outcomes have noise levels observed.
158-
159-
if is_constrained:
160-
for i in range(test_problem.num_constraints):
161-
outcome_name = f"constraint_slack_{i}"
162-
outcome_constraints.append(
163-
OutcomeConstraint(
164-
metric=BenchmarkMetric(
165-
name=outcome_name,
166-
lower_is_better=False, # positive slack = feasible
167-
observe_noise_sd=observe_noise_sd,
168-
outcome_index=i,
169-
),
170-
op=ComparisonOp.GEQ,
171-
bound=0.0,
172-
relative=False,
173-
)
174-
)
175-
outcome_names.append(outcome_name)
176-
177-
optimization_config = OptimizationConfig(
178-
objective=objective,
179-
outcome_constraints=outcome_constraints,
180-
)
181196
optimal_value = (
182197
test_problem.max_hv
183198
if isinstance(test_problem, MultiObjectiveTestProblem)

0 commit comments

Comments
 (0)