Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
change nd -> np in imagenet_gen_qsym_onedenn.py (#20399)
Browse files Browse the repository at this point in the history
also few tiny formatting fixes
  • Loading branch information
sfraczek authored Oct 8, 2021
1 parent 4d48b06 commit 36cb619
Show file tree
Hide file tree
Showing 2 changed files with 12 additions and 7 deletions.
10 changes: 6 additions & 4 deletions example/quantization/imagenet_gen_qsym_onednn.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ def download_calib_dataset(dataset_url, calib_dataset, logger=None):
logger.info('Downloading calibration dataset from %s to %s' % (dataset_url, calib_dataset))
mx.test_utils.download(dataset_url, calib_dataset)


def get_from_gluon(model_name, classes=1000, logger=None):
dir_path = os.path.dirname(os.path.realpath(__file__))
model_path = os.path.join(dir_path, 'model')
Expand All @@ -48,12 +49,14 @@ def get_from_gluon(model_name, classes=1000, logger=None):
prefix = os.path.join(model_path, model_name)
return net, prefix


def regex_find_excluded_symbols(patterns_dict, model_name):
for key, value in patterns_dict.items():
if re.search(key, model_name) is not None:
return value
return None


def get_exclude_symbols(model_name, exclude_first_conv):
"""Grouped supported models at the time of commit:
- alexnet
Expand Down Expand Up @@ -95,6 +98,7 @@ def get_exclude_symbols(model_name, exclude_first_conv):
excluded_sym_names += excluded_first_conv_sym_names
return excluded_sym_names


if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate a calibrated quantized model from a FP32 model with Intel oneDNN support')
parser.add_argument('--model', type=str, default='resnet50_v1',
Expand All @@ -116,7 +120,7 @@ def get_exclude_symbols(model_name, exclude_first_conv):
help='number of batches for calibration')
parser.add_argument('--exclude-first-conv', action='store_true', default=False,
help='excluding quantizing the first conv layer since the'
' input data may have negative value which doesn\'t support at moment' )
' input data may have negative value which doesn\'t support at moment')
parser.add_argument('--shuffle-dataset', action='store_true',
help='shuffle the calibration dataset')
parser.add_argument('--calib-mode', type=str, default='entropy',
Expand Down Expand Up @@ -170,8 +174,7 @@ def get_exclude_symbols(model_name, exclude_first_conv):
dir_path = os.path.dirname(os.path.realpath(__file__))
dir_path = os.path.join(dir_path, 'model')
if not os.path.exists(dir_path):
os.mkdir(dir_path) # without try catch block as we expect to finish
# script if it fail
os.mkdir(dir_path) # without try catch block as we expect to finish script if it fail

# download model
if not args.no_pretrained:
Expand All @@ -191,7 +194,6 @@ def get_exclude_symbols(model_name, exclude_first_conv):
epoch = args.epoch
net = gluon.SymbolBlock.imports("{}-symbol.json".format(prefix), ['data'], "{}-0000.params".format(prefix))


# get batch size
batch_size = args.batch_size
if logger:
Expand Down
9 changes: 6 additions & 3 deletions python/mxnet/contrib/quantization.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
from ..context import cpu, Context
from ..util import is_np_array


def _quantize_params(qsym, params, min_max_dict):
"""Given a quantized symbol and a dict of params that have not been quantized,
generate quantized params. Currently only supports quantizing the arg_params
Expand Down Expand Up @@ -86,6 +87,7 @@ def _quantize_params(qsym, params, min_max_dict):
quantized_params[name] = array_cls.array([min_max_dict[output][1]])
return quantized_params


def _quantize_symbol(sym, ctx, excluded_symbols=None, excluded_operators=None,
offline_params=None, quantized_dtype='int8', quantize_mode='smart',
quantize_granularity='tensor-wise'):
Expand Down Expand Up @@ -291,6 +293,7 @@ def get_optimal_thresholds(hist_dict, quantized_dtype, num_quantized_bins=255, l
logger.debug(f"layer={name}, min_val={min_val}, max_val={max_val}, th={th}, divergence={divergence}")
return th_dict


class _LayerOutputMinMaxCollector(CalibrationCollector):
"""Saves layer output min and max values in a dict with layer names as keys.
The collected min and max values will be directly used as thresholds for quantization.
Expand Down Expand Up @@ -319,11 +322,12 @@ def collect(self, name, op_name, arr):
self.logger.debug("Collecting layer %s min_range=%f, max_range=%f"
% (name, min_range, max_range))


def _calibrate_quantized_sym(qsym, min_max_dict):
"""Given a dictionary containing the thresholds for quantizing the layers,
set the thresholds into the quantized symbol as the params of requantize operators.
"""
if min_max_dict is None or len(min_max_dict) == 0:
if min_max_dict is None or len(min_max_dict) == 0:
return qsym
num_layer_outputs = len(min_max_dict)
layer_output_names = []
Expand Down Expand Up @@ -363,8 +367,6 @@ def _collect_layer_statistics(sym_block, data, collector, num_inputs, num_calib_
return num_batches




def _generate_list_of_data_desc(data_shapes, data_types):
""""Convert list ot tuples to list of DataDesc."""
if isinstance(data_shapes, list):
Expand Down Expand Up @@ -527,6 +529,7 @@ def quantize_model(sym, arg_params, aux_params, data_names=('data',),

return qsym, qarg_params, aux_params


def quantize_model_mkldnn(sym, arg_params, aux_params, data_names=('data',),
ctx=cpu(), excluded_sym_names=None, excluded_op_names=None,
calib_mode='entropy', calib_data=None, num_calib_batches=None,
Expand Down

0 comments on commit 36cb619

Please sign in to comment.