Skip to content

Commit

Permalink
Make all benchmark problems take an observe_noise_stds argument
Browse files Browse the repository at this point in the history
Summary:
Context:

One might expect `observe_noise_stds` to apply to either single-objective or multi-objective problems and `observe_noise_sd` applies only to single-objective problems. In reality, problems that derive from BoTorch synthetic test functions take an argument `observe_noise_sd` while those that derive from surrogates have `observe_noise_stds`. I find this needlessly confusing, especially given that we don't have any problems where some outputs have observed noise and others don't, and we might want to use that for either type of problem in the future.

This PR:

* Gives all problems the argument `observe_noise_stds` only
* Updates call sites, mainly for `SingleObjectiveBenchmarkProblem.`

Note: Similar to D60194654, breaks backward compatibility

Note: Docstrings are updated in the next diff.

Differential Revision: D60241406
  • Loading branch information
esantorella authored and facebook-github-bot committed Jul 25, 2024
1 parent 3a44dad commit 629ab43
Show file tree
Hide file tree
Showing 7 changed files with 20 additions and 26 deletions.
26 changes: 10 additions & 16 deletions ax/benchmark/benchmark_problem.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ def __init__(
runner: Runner,
num_trials: int,
is_noiseless: bool = False,
observe_noise_sd: bool = False,
observe_noise_stds: Union[bool, Dict[str, bool]] = False,
has_ground_truth: bool = False,
tracking_metrics: Optional[List[BenchmarkMetricBase]] = None,
) -> None:
Expand All @@ -104,20 +104,14 @@ def __init__(
self._runner = runner
self.num_trials = num_trials
self.is_noiseless = is_noiseless
self.observe_noise_sd = observe_noise_sd
self.observe_noise_stds = observe_noise_stds
self.has_ground_truth = has_ground_truth
self.tracking_metrics: List[BenchmarkMetricBase] = tracking_metrics or []

@property
def runner(self) -> Runner:
return self._runner

@property
def observe_noise_stds(self) -> Union[bool, Dict[str, bool]]:
# TODO: Handle cases where some outcomes have noise levels observed
# and others do not.
return self.observe_noise_sd

@classmethod
def from_botorch(
cls,
Expand Down Expand Up @@ -216,7 +210,7 @@ def from_botorch(
outcome_names=outcome_names,
),
num_trials=num_trials,
observe_noise_sd=observe_noise_sd,
observe_noise_stds=observe_noise_sd,
is_noiseless=test_problem.noise_std in (None, 0.0),
has_ground_truth=True, # all synthetic problems have ground truth
)
Expand All @@ -232,7 +226,7 @@ def __repr__(self) -> str:
f"optimization_config={self.optimization_config}, "
f"num_trials={self.num_trials}, "
f"is_noiseless={self.is_noiseless}, "
f"observe_noise_sd={self.observe_noise_sd}, "
f"observe_noise_stds={self.observe_noise_stds}, "
f"has_ground_truth={self.has_ground_truth}, "
f"tracking_metrics={self.tracking_metrics})"
)
Expand All @@ -253,7 +247,7 @@ def __init__(
runner: Runner,
num_trials: int,
is_noiseless: bool = False,
observe_noise_sd: bool = False,
observe_noise_stds: Union[bool, Dict[str, bool]] = False,
has_ground_truth: bool = False,
tracking_metrics: Optional[List[BenchmarkMetricBase]] = None,
) -> None:
Expand All @@ -264,7 +258,7 @@ def __init__(
runner=runner,
num_trials=num_trials,
is_noiseless=is_noiseless,
observe_noise_sd=observe_noise_sd,
observe_noise_stds=observe_noise_stds,
has_ground_truth=has_ground_truth,
tracking_metrics=tracking_metrics,
)
Expand Down Expand Up @@ -307,7 +301,7 @@ def from_botorch_synthetic(
runner=problem.runner,
num_trials=num_trials,
is_noiseless=problem.is_noiseless,
observe_noise_sd=problem.observe_noise_sd,
observe_noise_stds=problem.observe_noise_stds,
has_ground_truth=problem.has_ground_truth,
optimal_value=test_problem.optimal_value,
)
Expand All @@ -332,7 +326,7 @@ def __init__(
runner: Runner,
num_trials: int,
is_noiseless: bool = False,
observe_noise_sd: bool = False,
observe_noise_stds: Union[bool, Dict[str, bool]] = False,
has_ground_truth: bool = False,
tracking_metrics: Optional[List[BenchmarkMetricBase]] = None,
) -> None:
Expand All @@ -345,7 +339,7 @@ def __init__(
runner=runner,
num_trials=num_trials,
is_noiseless=is_noiseless,
observe_noise_sd=observe_noise_sd,
observe_noise_stds=observe_noise_stds,
has_ground_truth=has_ground_truth,
tracking_metrics=tracking_metrics,
)
Expand Down Expand Up @@ -421,7 +415,7 @@ def from_botorch_multi_objective(
runner=problem.runner,
num_trials=num_trials,
is_noiseless=problem.is_noiseless,
observe_noise_sd=observe_noise_sd,
observe_noise_stds=observe_noise_sd,
has_ground_truth=problem.has_ground_truth,
optimal_value=test_problem.max_hv,
reference_point=test_problem._ref_point,
Expand Down
2 changes: 1 addition & 1 deletion ax/benchmark/problems/hpo/pytorch_cnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ def from_datasets(
runner=runner,
num_trials=num_trials,
is_noiseless=False,
observe_noise_sd=False,
observe_noise_stds=False,
has_ground_truth=False,
)

Expand Down
2 changes: 1 addition & 1 deletion ax/benchmark/problems/hpo/torchvision.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ def from_dataset_name(
runner=runner,
num_trials=num_trials,
is_noiseless=False,
observe_noise_sd=False,
observe_noise_stds=False,
has_ground_truth=False,
optimal_value=problem.optimal_value,
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ def _get_problem_from_common_inputs(
num_trials=num_trials,
optimal_value=optimal_value,
is_noiseless=True,
observe_noise_sd=observe_noise_sd,
observe_noise_stds=observe_noise_sd,
has_ground_truth=True,
)

Expand Down
2 changes: 1 addition & 1 deletion ax/benchmark/problems/synthetic/hss/jenatton.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ def get_jenatton_benchmark_problem(
runner=SyntheticRunner(),
num_trials=num_trials,
is_noiseless=True,
observe_noise_sd=observe_noise_sd,
observe_noise_stds=observe_noise_sd,
has_ground_truth=True,
optimal_value=0.1,
)
4 changes: 2 additions & 2 deletions ax/benchmark/tests/test_benchmark_problem.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ def test_single_objective_from_botorch(self) -> None:
"minimize=True), outcome_constraints=[]), "
"num_trials=1, "
"is_noiseless=True, "
"observe_noise_sd=False, "
"observe_noise_stds=False, "
"has_ground_truth=True, "
"tracking_metrics=[])"
)
Expand All @@ -103,7 +103,7 @@ def test_single_objective_from_botorch(self) -> None:
" >= 0.0)]), "
"num_trials=1, "
"is_noiseless=True, "
"observe_noise_sd=False, "
"observe_noise_stds=False, "
"has_ground_truth=True, "
"tracking_metrics=[])"
)
Expand Down
8 changes: 4 additions & 4 deletions ax/storage/json_store/encoders.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ def benchmark_problem_to_dict(benchmark_problem: BenchmarkProblem) -> Dict[str,
"runner": benchmark_problem.runner,
"num_trials": benchmark_problem.num_trials,
"is_noiseless": benchmark_problem.is_noiseless,
"observe_noise_sd": benchmark_problem.observe_noise_sd,
"observe_noise_stds": benchmark_problem.observe_noise_stds,
"has_ground_truth": benchmark_problem.has_ground_truth,
"tracking_metrics": benchmark_problem.tracking_metrics,
}
Expand All @@ -162,7 +162,7 @@ def multi_objective_benchmark_problem_to_dict(
"runner": moo_benchmark_problem.runner,
"num_trials": moo_benchmark_problem.num_trials,
"is_noiseless": moo_benchmark_problem.is_noiseless,
"observe_noise_sd": moo_benchmark_problem.observe_noise_sd,
"observe_noise_stds": moo_benchmark_problem.observe_noise_stds,
"has_ground_truth": moo_benchmark_problem.has_ground_truth,
"tracking_metrics": moo_benchmark_problem.tracking_metrics,
"optimal_value": moo_benchmark_problem.optimal_value,
Expand All @@ -181,7 +181,7 @@ def single_objective_benchmark_problem_to_dict(
"runner": soo_benchmark_problem.runner,
"num_trials": soo_benchmark_problem.num_trials,
"is_noiseless": soo_benchmark_problem.is_noiseless,
"observe_noise_sd": soo_benchmark_problem.observe_noise_sd,
"observe_noise_stds": soo_benchmark_problem.observe_noise_stds,
"has_ground_truth": soo_benchmark_problem.has_ground_truth,
"tracking_metrics": soo_benchmark_problem.tracking_metrics,
"optimal_value": soo_benchmark_problem.optimal_value,
Expand Down Expand Up @@ -748,7 +748,7 @@ def pytorch_cnn_torchvision_benchmark_problem_to_dict(
"__type": problem.__class__.__name__,
"name": not_none(re.compile("(?<=::).*").search(problem.name)).group(),
"num_trials": problem.num_trials,
"observe_noise_sd": problem.observe_noise_sd,
"observe_noise_stds": problem.observe_noise_stds,
}


Expand Down

0 comments on commit 629ab43

Please sign in to comment.