Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
Fix lint
Browse files Browse the repository at this point in the history
Change-Id: Ia38369d31c33d0f76a671275910729dfce693950
  • Loading branch information
ZhennanQin committed Aug 21, 2019
1 parent 5aa8a26 commit ad36eb0
Show file tree
Hide file tree
Showing 6 changed files with 44 additions and 24 deletions.
12 changes: 12 additions & 0 deletions include/mxnet/c_api_error.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,11 +32,23 @@
* The finally clause contains procedure to cleanup states when an error happens.
*/
#define MX_API_BEGIN() \
try { \
on_enter_api(__FUNCTION__);
#define MX_API_END() \
} \
catch (const std::exception &_except_) { \
on_exit_api(); \
return MXAPIHandleException(_except_); \
} \
on_exit_api(); \
return 0; // NOLINT(*)
#define MX_API_END_HANDLE_ERROR(Finalize) \
} \
catch (const std::exception &_except_) { \
Finalize; \
on_exit_api(); \
return MXAPIHandleException(_except_); \
} \
on_exit_api(); \
return 0; // NOLINT(*)
/*!
Expand Down
19 changes: 11 additions & 8 deletions python/mxnet/contrib/quantization.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@
except ImportError:
stats = None

import sys
import ctypes
import logging
import os
Expand Down Expand Up @@ -154,6 +153,8 @@ def _quantize_symbol(sym, ctx, excluded_symbols=None, excluded_operators=None,
return Symbol(out), calib_layer

def combine_histogram(old_hist, arr, new_min, new_max, new_th):
""" Collect layer histogram for arr and combine it with old histogram.
"""
(old_hist, old_hist_edges, old_min, old_max, old_th) = old_hist
if new_th <= old_th:
hist, _ = np.histogram(arr, bins=len(old_hist), range=(-old_th, old_th))
Expand Down Expand Up @@ -282,7 +283,7 @@ def _collect_layer_output_min_max(mod, data, quantized_dtype, include_layer=None
return collector.min_max_dict, num_examples


def _collect_layer_histogram(mod, data, quantized_dtype, include_layer=None,
def _collect_layer_histogram(mod, data, include_layer=None,
max_num_examples=None, logger=None):
"""Collect layer outputs and save them in a dictionary mapped by layer names."""
collector = _LayerHistogramCollector(include_layer=include_layer, logger=logger)
Expand Down Expand Up @@ -492,11 +493,13 @@ def quantize_model(sym, arg_params, aux_params,
The maximum number of examples that user would like to use for calibration. If not provided,
the whole calibration dataset will be used.
quantized_dtype : str
The quantized destination type for input data. Currently support 'int8'
, 'uint8' and 'auto'. 'auto' means automatically select output type according to calibration result.
The quantized destination type for input data. Currently support 'int8', 'uint8' and 'auto'.
'auto' means automatically select output type according to calibration result.
Default value is 'int8'.
quantize_mode : str
The mode that quantization pass to apply. Support 'full' and 'smart'. 'full' means quantize all operator if possible. 'smart' means quantization pass will smartly choice which operator should be quantized.
The mode that quantization pass to apply. Support 'full' and 'smart'.
'full' means quantize all operator if possible.
'smart' means quantization pass will smartly choice which operator should be quantized.
logger : Object
A logging object for printing information during the process of quantization.
Expand Down Expand Up @@ -548,7 +551,7 @@ def quantize_model(sym, arg_params, aux_params,
mod.bind(for_training=False, data_shapes=calib_data.provide_data)
mod.set_params(arg_params, aux_params)
if calib_mode == 'entropy':
hist_dict, num_examples = _collect_layer_histogram(mod, calib_data, quantized_dtype,
hist_dict, num_examples = _collect_layer_histogram(mod, calib_data,
include_layer=calib_layer,
max_num_examples=num_calib_examples,
logger=logger)
Expand Down Expand Up @@ -681,8 +684,8 @@ def quantize_graph(sym, arg_params, aux_params, ctx=cpu(),
logger.info(
'Create a layer output collector for entropy calibration.')
elif calib_mode == 'naive':
collector = _LayerOutputMinMaxCollector(
include_layer=calib_layer, logger=logger)
collector = _LayerOutputMinMaxCollector(quantized_dtype=quantized_dtype,
include_layer=calib_layer, logger=logger)
logger.info(
'Create a layer output minmax collector for naive calibration')
else:
Expand Down
2 changes: 1 addition & 1 deletion src/c_api/c_api_symbolic.cc
Original file line number Diff line number Diff line change
Expand Up @@ -918,7 +918,7 @@ int MXQuantizeSymbol(SymbolHandle sym_handle,
g.attrs["target_ctx"] = std::make_shared<nnvm::any>(target_dev);
g.attrs["quantize_mode"] = std::make_shared<nnvm::any>(std::move(quantized_mode));
g = ApplyPass(std::move(g), "QuantizeGraph");
const auto& calib_nodes =g.GetAttr<std::vector<std::string>>("calib_nodes");
const auto& calib_nodes = g.GetAttr<std::vector<std::string>>("calib_nodes");
MXAPIThreadLocalEntry<> *ret = MXAPIThreadLocalStore<>::Get();
ret->ret_vec_str = std::move(calib_nodes);
*out_num_calib_names = ret->ret_vec_str.size();
Expand Down
7 changes: 5 additions & 2 deletions src/operator/nn/mkldnn/mkldnn_flatten-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,13 @@
*/

/*!
* \file mkldnn_flatten.cc
* \file mkldnn_flatten-inl.h
* \brief Implement flatten operator by using mkldnn reorder primitive
* \author Wuxun Zhang
*/

#ifndef MXNET_OPERATOR_NN_MKLDNN_MKLDNN_FLATTEN_INL_H_
#define MXNET_OPERATOR_NN_MKLDNN_MKLDNN_FLATTEN_INL_H_
#if MXNET_USE_MKLDNN == 1

#include "mkldnn_reshape-inl.h"
Expand All @@ -42,4 +44,5 @@ void MKLDNNFlattenForward(const nnvm::NodeAttrs &attrs, const OpContext &ctx, co
} // namespace op
} // namespace mxnet

#endif
#endif // MXNET_USE_MKLDNN == 1
#endif // MXNET_OPERATOR_NN_MKLDNN_MKLDNN_FLATTEN_INL_H_
5 changes: 3 additions & 2 deletions src/operator/quantization/calibrate.cc
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,8 @@ std::vector<float> SmoothDistribution(const std::vector<float>& p, const float e
}
return ret;
}
static float ComputeEntropy(std::vector<float>& p, std::vector<float>& q) {

static float ComputeEntropy(std::vector<float>* p, std::vector<float>* q) {
CHECK_EQ(p.size(), q.size());
float p_sum = std::accumulate(p.begin(), p.end(), 0.f);
float q_sum = std::accumulate(q.begin(), q.end(), 0.f);
Expand Down Expand Up @@ -150,7 +151,7 @@ void CalibrateComputeCPU(const nnvm::NodeAttrs& attrs, const OpContext& ctx,
if (!q.size()) {
divergence[i - num_half_quantized_bins] = std::numeric_limits<float>::infinity();
} else {
divergence[i - num_half_quantized_bins] = ComputeEntropy(p, q);
divergence[i - num_half_quantized_bins] = ComputeEntropy(&p, &q);
}
}

Expand Down
23 changes: 12 additions & 11 deletions src/operator/quantization/quantize_graph_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -55,9 +55,9 @@ static inline size_t GetNumOutputs(NodePtr node) {
}

static inline std::string GetOutputName(const NodeEntry& e) {
nnvm::Symbol sym;
sym.outputs.push_back(e);
return sym.ListOutputNames()[0];
nnvm::Symbol sym;
sym.outputs.push_back(e);
return sym.ListOutputNames()[0];
}

NodePtr CreateNode(std::string op_name, std::string node_name) {
Expand Down Expand Up @@ -174,8 +174,7 @@ inline QuantizeType NeedQuantize(NodePtr node,
}
if (quantizable_map.count(op)) {
return quantizable_map[op](node->attrs);
}
else {
} else {
return QuantizeType::kSupport;
}
}
Expand All @@ -190,7 +189,7 @@ enum quantize_bit {
};

static void MarkQuantizedNodes(const Graph& src,
std::unordered_map<NodePtr, NodePtr>& quantized_node_map) {
std::unordered_map<NodePtr, NodePtr>* quantized_node_map) {
const auto excluded_nodes = src.GetAttr<std::unordered_set<std::string>>("excluded_nodes");
const auto excluded_ops = src.GetAttr<std::unordered_set<std::string>>("excluded_ops");
const auto quantize_mode = src.GetAttr<std::string>("quantize_mode");
Expand Down Expand Up @@ -273,7 +272,7 @@ Graph QuantizeGraph(Graph &&src) {
const auto quantized_dtype = src.GetAttr<std::string>("quantized_dtype");

std::unordered_map<NodePtr, NodePtr> quantized_node_map;
MarkQuantizedNodes(src, quantized_node_map);
MarkQuantizedNodes(src, &quantized_node_map);

// mirror_map stores the mapping from the currently visited graph to the newly created quantized
// graph. Key is the currently visited graph's node pointer, and value is a copied node of the key
Expand Down Expand Up @@ -471,8 +470,10 @@ Graph QuantizeGraph(Graph &&src) {
Graph ret;
ret.outputs = std::move(outputs);

static const auto& need_calib_input_map = Op::GetAttr<mxnet::FNeedCalibrateInput>("FNeedCalibrateInput");
static const auto& need_calib_output_map = Op::GetAttr<mxnet::FNeedCalibrateOutput>("FNeedCalibrateOutput");
static const auto& need_calib_input_map =
Op::GetAttr<mxnet::FNeedCalibrateInput>("FNeedCalibrateInput");
static const auto& need_calib_output_map =
Op::GetAttr<mxnet::FNeedCalibrateOutput>("FNeedCalibrateOutput");
std::vector<std::string> calib_nodes;
DFSVisit(ret.outputs, [&](const NodePtr& node) {
if (need_calib_input_map.count(node->op())) {
Expand All @@ -499,7 +500,8 @@ Graph QuantizeGraph(Graph &&src) {
const auto calib_idx = need_calib_output_map[node->op()](node->attrs);
for (const auto& idx : calib_idx) {
if (reverse_mirror_map.count(node)) {
calib_nodes.push_back(GetOutputName({reverse_mirror_map[node], static_cast<uint32_t>(idx), 0}));
calib_nodes.push_back(
GetOutputName({reverse_mirror_map[node], static_cast<uint32_t>(idx), 0}));
} else {
calib_nodes.push_back(GetOutputName({node, static_cast<uint32_t>(idx), 0}));
}
Expand All @@ -514,7 +516,6 @@ static inline void SetCalibTableForEntry(
const NodeEntry& e, const NodePtr& node,
const std::unordered_map<std::string, std::pair<float, float>>& calib_table) {
std::string out_data_name = GetOutputName(e);
;
const std::string prefix = "quantized_";
if (e.node->attrs.name.rfind(prefix, 0) == 0) {
out_data_name = out_data_name.substr(prefix.size());
Expand Down

0 comments on commit ad36eb0

Please sign in to comment.