diff --git a/neural_compressor/contrib/strategy/sigopt.py b/neural_compressor/contrib/strategy/sigopt.py index 54593fb2e32..19b3ae1ed3e 100644 --- a/neural_compressor/contrib/strategy/sigopt.py +++ b/neural_compressor/contrib/strategy/sigopt.py @@ -17,12 +17,14 @@ import copy from neural_compressor.utils import logger +from neural_compressor.utils.utility import LazyImport from neural_compressor.strategy.strategy import strategy_registry, TuneStrategy -from sigopt import Connection from collections import OrderedDict from neural_compressor.strategy.st_utils.tuning_sampler import OpWiseTuningSampler from neural_compressor.strategy.st_utils.tuning_structs import OpTuningConfig +sigopt = LazyImport('sigopt') + @strategy_registry class SigOptTuneStrategy(TuneStrategy): """The tuning strategy using SigOpt HPO search in tuning space. @@ -80,7 +82,15 @@ def __init__(self, model, conf, q_dataloader, q_func=None, eval_func, dicts, q_hooks) - + # Initialize the SigOpt tuning strategy if the user specified to use it. + strategy_name = conf.usr_cfg.tuning.strategy.name + if strategy_name.lower() == "sigopt": + try: + import sigopt + except ImportError: + ImportError(f"Please install sigopt for using {strategy_name} strategy.") + else: + pass # SigOpt init client_token = conf.usr_cfg.tuning.strategy.sigopt_api_token self.project_id = conf.usr_cfg.tuning.strategy.sigopt_project_id @@ -107,7 +117,7 @@ def __init__(self, model, conf, q_dataloader, q_func=None, else: logger.info("Experiment name is {}.".format(self.experiment_name)) - self.conn = Connection(client_token) + self.conn = sigopt.Connection(client_token) self.experiment = None def params_to_tune_configs(self, params): diff --git a/neural_compressor/contrib/strategy/tpe.py b/neural_compressor/contrib/strategy/tpe.py index 9baf2911904..39362f1749b 100644 --- a/neural_compressor/contrib/strategy/tpe.py +++ b/neural_compressor/contrib/strategy/tpe.py @@ -20,14 +20,14 @@ from pathlib import Path from functools import partial import numpy as np -import hyperopt as hpo -from hyperopt import fmin, hp, STATUS_OK, Trials from neural_compressor.utils import logger +from neural_compressor.utils.utility import LazyImport from neural_compressor.strategy.strategy import strategy_registry, TuneStrategy from collections import OrderedDict from neural_compressor.strategy.st_utils.tuning_sampler import OpWiseTuningSampler from neural_compressor.strategy.st_utils.tuning_structs import OpTuningConfig +hyperopt = LazyImport('hyperopt') try: import pandas as pd @@ -85,10 +85,19 @@ def __init__(self, model, conf, q_dataloader, q_func=None, eval_dataloader=None, eval_func=None, dicts=None, q_hooks=None): assert conf.usr_cfg.quantization.approach == 'post_training_static_quant', \ "TPE strategy is only for post training static quantization!" + # Initialize the tpe tuning strategy if the user specified to use it. + strategy_name = conf.usr_cfg.tuning.strategy.name + if strategy_name.lower() == "tpe": + try: + import hyperopt + except ImportError: + raise ImportError(f"Please install hyperopt for using {strategy_name} strategy.") + else: + pass self.hpopt_search_space = None self.warm_start = False self.cfg_evaluated = False - self.hpopt_trials = Trials() + self.hpopt_trials = hyperopt.Trials() self.max_trials = conf.usr_cfg.tuning.exit_policy.get('max_trials', 200) self.loss_function_config = { 'acc_th': conf.usr_cfg.tuning.accuracy_criterion.relative if \ @@ -140,7 +149,7 @@ def __getstate__(self): def _configure_hpopt_search_space_and_params(self, search_space): self.hpopt_search_space = {} for param, configs in search_space.items(): - self.hpopt_search_space[(param)] = hp.choice((param[0]), configs) + self.hpopt_search_space[(param)] = hyperopt.hp.choice((param[0]), configs) # Find minimum number of choices for params with more than one choice multichoice_params = [len(configs) for param, configs in search_space.items() if len(configs) > 1] @@ -149,7 +158,7 @@ def _configure_hpopt_search_space_and_params(self, search_space): min_param_size = min(multichoice_params) if len(multichoice_params) > 0 else 1 self.tpe_params['n_EI_candidates'] = min_param_size self.tpe_params['prior_weight'] = 1 / min_param_size - self._algo = partial(hpo.tpe.suggest, + self._algo = partial(hyperopt.tpe.suggest, n_startup_jobs=self.tpe_params['n_initial_point'], gamma=self.tpe_params['gamma'], n_EI_candidates=self.tpe_params['n_EI_candidates'], @@ -225,12 +234,12 @@ def initial_op_quant_mode(items_lst, target_quant_mode, op_item_dtype_dict): self._configure_hpopt_search_space_and_params(first_run_cfg) # Run first iteration with best result from history trials_count = len(self.hpopt_trials.trials) + 1 - fmin(partial(self.object_evaluation, model=self.model), - space=self.hpopt_search_space, - algo=self._algo, - max_evals=trials_count, - trials=self.hpopt_trials, - show_progressbar=False) + hyperopt.fmin(partial(self.object_evaluation, model=self.model), + space=self.hpopt_search_space, + algo=self._algo, + max_evals=trials_count, + trials=self.hpopt_trials, + show_progressbar=False) if pd is not None: self._save_trials(trials_file) self._update_best_result(best_result_file) @@ -266,12 +275,12 @@ def initial_op_quant_mode(items_lst, target_quant_mode, op_item_dtype_dict): self.cfg_evaluated = False logger.debug("Trial iteration start: {} / {}.".format( trials_count, self.max_trials)) - fmin(partial(self.object_evaluation, model=self.model), - space=self.hpopt_search_space, - algo=self._algo, - max_evals=trials_count, - trials=self.hpopt_trials, - show_progressbar=False) + hyperopt.fmin(partial(self.object_evaluation, model=self.model), + space=self.hpopt_search_space, + algo=self._algo, + max_evals=trials_count, + trials=self.hpopt_trials, + show_progressbar=False) trials_count += 1 if pd is not None: self._save_trials(trials_file) @@ -349,7 +358,7 @@ def _compute_metrics(self, tune_cfg, acc, lat): 'acc_loss': acc_diff, 'lat_diff': lat_diff, 'quantization_ratio': quantization_ratio, - 'status': STATUS_OK} + 'status': hyperopt.STATUS_OK} def _calculate_acc_lat_diff(self, acc, lat): int8_acc = acc diff --git a/requirements.txt b/requirements.txt index 6da20f57fee..01f0fce2449 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,7 +7,6 @@ scikit-image matplotlib schema py-cpuinfo -hyperopt contextlib2 requests Flask @@ -20,7 +19,6 @@ Pillow pycocotools-windows; sys_platform != 'linux' pycocotools; sys_platform == 'linux' opencv-python -sigopt prettytable cryptography sqlalchemy==1.4.27 diff --git a/setup.py b/setup.py index 7dd91a69efb..caa4ca09e85 100644 --- a/setup.py +++ b/setup.py @@ -36,8 +36,8 @@ # define install requirements install_requires_list = [ - 'numpy', 'pyyaml', 'scikit-learn', 'schema', 'py-cpuinfo', 'hyperopt', 'pandas', 'pycocotools', - 'opencv-python', 'requests', 'psutil', 'Pillow', 'sigopt', 'prettytable', 'cryptography', 'Cython', + 'numpy', 'pyyaml', 'scikit-learn', 'schema', 'py-cpuinfo', 'pandas', 'pycocotools', + 'opencv-python', 'requests', 'psutil', 'Pillow', 'prettytable', 'cryptography', 'Cython', 'deprecated'] ux_install_requires_list = [ 'Flask-Cors', 'Flask-SocketIO', 'Flask', 'gevent-websocket', 'gevent','sqlalchemy==1.4.27', 'alembic==1.7.7'] diff --git a/test/requirements.txt b/test/requirements.txt index c570fff1dec..30712c4bafb 100644 --- a/test/requirements.txt +++ b/test/requirements.txt @@ -14,6 +14,7 @@ transformers<=4.12.3; python_version < '3.10' transformers==4.16.0; python_version == '3.10' tensorflow_model_optimization sigopt +hyperopt horovod tensorflow-addons onnxruntime-extensions; python_version < '3.10'