Skip to content

Commit

Permalink
Release used ndarrays in calibration for saving memory usage
Browse files Browse the repository at this point in the history
  • Loading branch information
reminisce committed Jan 16, 2018
1 parent 2862776 commit c1fe124
Showing 1 changed file with 7 additions and 4 deletions.
11 changes: 7 additions & 4 deletions python/mxnet/quantization.py
Original file line number Diff line number Diff line change
Expand Up @@ -273,13 +273,16 @@ def _get_optimal_thresholds(nd_dict, num_bins=8001, num_quantized_bins=255, logg
logger.info('Calculating optimal thresholds for quantization using KL divergence'
' with num_bins=%d and num_quantized_bins=%d' % (num_bins, num_quantized_bins))
th_dict = {}
for k, v in nd_dict.items():
min_val, max_val, min_divergence, opt_th = _get_optimal_threshold(v, num_bins=num_bins,
layer_names = nd_dict.keys()
for name in layer_names:
assert name in nd_dict
min_val, max_val, min_divergence, opt_th = _get_optimal_threshold(nd_dict[name], num_bins=num_bins,
num_quantized_bins=num_quantized_bins)
th_dict[k] = (-opt_th, opt_th)
del nd_dict[name] # release the memory of ndarray
th_dict[name] = (-opt_th, opt_th)
if logger is not None:
logger.info('layer=%s, min_val=%f, max_val=%f, min_divergence=%f, optimal_threshold=%f'
% (k, min_val, max_val, min_divergence, opt_th))
% (name, min_val, max_val, min_divergence, opt_th))
return th_dict


Expand Down

0 comments on commit c1fe124

Please sign in to comment.