diff --git a/autoPyTorch/api/base_task.py b/autoPyTorch/api/base_task.py index 80d8bd51e..905d795fd 100644 --- a/autoPyTorch/api/base_task.py +++ b/autoPyTorch/api/base_task.py @@ -690,6 +690,7 @@ def _do_dummy_prediction(self) -> None: backend=self._backend, seed=self.seed, metric=self._metric, + multi_objectives=["cost"], logger_port=self._logger_port, cost_for_crash=get_cost_of_crash(self._metric), abort_on_first_run_crash=False, @@ -773,6 +774,7 @@ def _do_traditional_prediction(self, time_left: int, func_eval_time_limit_secs: pynisher_context=self._multiprocessing_context, backend=self._backend, seed=self.seed, + multi_objectives=["cost"], metric=self._metric, logger_port=self._logger_port, cost_for_crash=get_cost_of_crash(self._metric), @@ -1575,6 +1577,7 @@ def fit_pipeline( backend=self._backend, seed=self.seed, metric=metric, + multi_objectives=["cost"], logger_port=self._logger_port, cost_for_crash=get_cost_of_crash(metric), abort_on_first_run_crash=False, diff --git a/autoPyTorch/evaluation/tae.py b/autoPyTorch/evaluation/tae.py index 7ca895304..b109dbb1a 100644 --- a/autoPyTorch/evaluation/tae.py +++ b/autoPyTorch/evaluation/tae.py @@ -111,6 +111,7 @@ def __init__( cost_for_crash: float, abort_on_first_run_crash: bool, pynisher_context: str, + multi_objectives: List[str], pipeline_config: Optional[Dict[str, Any]] = None, initial_num_run: int = 1, stats: Optional[Stats] = None, diff --git a/autoPyTorch/utils/single_thread_client.py b/autoPyTorch/utils/single_thread_client.py index 9bb0fe3eb..30fd05b94 100644 --- a/autoPyTorch/utils/single_thread_client.py +++ b/autoPyTorch/utils/single_thread_client.py @@ -61,8 +61,24 @@ def submit( func: Callable, *args: List, priority: int = 0, + key: Any = None, + workers: Any = None, + resources: Any = None, + retries: Any = None, + fifo_timeout: Any = "100 ms", + allow_other_workers: Any = False, + actor: Any = False, + actors: Any = False, + pure: Any = None, **kwargs: Any, ) -> Any: + """ + Note + ---- + The keyword arguments caught in `dask.distributed.Client` need to + be specified here so they don't get passed in as ``**kwargs`` to the + ``func``. + """ return DummyFuture(func(*args, **kwargs)) def close(self) -> None: diff --git a/test/test_evaluation/test_evaluation.py b/test/test_evaluation/test_evaluation.py index 051a1c174..2cabb6a73 100644 --- a/test/test_evaluation/test_evaluation.py +++ b/test/test_evaluation/test_evaluation.py @@ -99,6 +99,7 @@ def test_eval_with_limits_holdout(self, pynisher_mock): config.config_id = 198 ta = ExecuteTaFuncWithQueue(backend=BackendMock(), seed=1, stats=self.stats, + multi_objectives=["cost"], memory_limit=3072, metric=accuracy, cost_for_crash=get_cost_of_crash(accuracy), @@ -120,6 +121,7 @@ def test_cutoff_lower_than_remaining_time(self, pynisher_mock): ta = ExecuteTaFuncWithQueue(backend=BackendMock(), seed=1, stats=self.stats, memory_limit=3072, + multi_objectives=["cost"], metric=accuracy, cost_for_crash=get_cost_of_crash(accuracy), abort_on_first_run_crash=False, @@ -146,6 +148,7 @@ def test_eval_with_limits_holdout_fail_timeout(self, pynisher_mock): ta = ExecuteTaFuncWithQueue(backend=BackendMock(), seed=1, stats=self.stats, memory_limit=3072, + multi_objectives=["cost"], metric=accuracy, cost_for_crash=get_cost_of_crash(accuracy), abort_on_first_run_crash=False, @@ -166,6 +169,7 @@ def test_zero_or_negative_cutoff(self, pynisher_mock): ta = ExecuteTaFuncWithQueue(backend=BackendMock(), seed=1, stats=self.stats, memory_limit=3072, + multi_objectives=["cost"], metric=accuracy, cost_for_crash=get_cost_of_crash(accuracy), abort_on_first_run_crash=False, @@ -187,6 +191,7 @@ def test_eval_with_limits_holdout_fail_silent(self, pynisher_mock): ta = ExecuteTaFuncWithQueue(backend=BackendMock(), seed=1, stats=self.stats, memory_limit=3072, + multi_objectives=["cost"], metric=accuracy, cost_for_crash=get_cost_of_crash(accuracy), abort_on_first_run_crash=False, @@ -228,6 +233,7 @@ def test_eval_with_limits_holdout_fail_memory_error(self, pynisher_mock): ta = ExecuteTaFuncWithQueue(backend=BackendMock(), seed=1, stats=self.stats, memory_limit=3072, + multi_objectives=["cost"], metric=accuracy, cost_for_crash=get_cost_of_crash(accuracy), abort_on_first_run_crash=False, @@ -266,6 +272,7 @@ def side_effect(**kwargs): ta = ExecuteTaFuncWithQueue(backend=BackendMock(), seed=1, stats=self.stats, memory_limit=3072, + multi_objectives=["cost"], metric=accuracy, cost_for_crash=get_cost_of_crash(accuracy), abort_on_first_run_crash=False, @@ -289,6 +296,7 @@ def side_effect(**kwargs): ta = ExecuteTaFuncWithQueue(backend=BackendMock(), seed=1, stats=self.stats, memory_limit=3072, + multi_objectives=["cost"], metric=accuracy, cost_for_crash=get_cost_of_crash(accuracy), abort_on_first_run_crash=False, @@ -316,6 +324,7 @@ def side_effect(*args, **kwargs): ta = ExecuteTaFuncWithQueue(backend=BackendMock(), seed=1, stats=self.stats, memory_limit=3072, + multi_objectives=["cost"], metric=accuracy, cost_for_crash=get_cost_of_crash(accuracy), abort_on_first_run_crash=False, @@ -340,6 +349,7 @@ def test_exception_in_target_function(self, eval_holdout_mock): ta = ExecuteTaFuncWithQueue(backend=BackendMock(), seed=1, stats=self.stats, memory_limit=3072, + multi_objectives=["cost"], metric=accuracy, cost_for_crash=get_cost_of_crash(accuracy), abort_on_first_run_crash=False, @@ -363,6 +373,7 @@ def test_silent_exception_in_target_function(self): ta = ExecuteTaFuncWithQueue(backend=BackendMock(), seed=1, stats=self.stats, memory_limit=3072, + multi_objectives=["cost"], metric=accuracy, cost_for_crash=get_cost_of_crash(accuracy), abort_on_first_run_crash=False, @@ -401,6 +412,7 @@ def test_eval_with_simple_intensification(self): ta = ExecuteTaFuncWithQueue(backend=BackendMock(), seed=1, stats=self.stats, memory_limit=3072, + multi_objectives=["cost"], metric=accuracy, cost_for_crash=get_cost_of_crash(accuracy), abort_on_first_run_crash=False,