Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Enable clang-tidy performance rules #19226

Merged
merged 3 commits into from
Oct 5, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 9 additions & 1 deletion .clang-tidy
Original file line number Diff line number Diff line change
Expand Up @@ -39,12 +39,20 @@ WarningsAsErrors: >
modernize-use-emplace, modernize-use-equals-default, modernize-use-equals-delete,
modernize-use-noexcept, modernize-use-nullptr, modernize-use-override,
modernize-use-transparent-functors, modernize-use-using,
performance-unnecessary-copy-initialization, performance-move-const-arg
performance-faster-string-find, performance-implicit-conversion-in-loop,
performance-inefficient-algorithm, performance-inefficient-string-concatenation,
performance-trivially-destructible, performance-inefficient-vector-operation,
performance-move-const-arg, performance-move-constructor-init,
performance-noexcept-move-constructor, performance-no-automatic-move,
performance-unnecessary-copy-initialization, performance-type-promotion-in-math-fn

# modernize checks not enforced:
# modernize-use-auto
# modernize-avoid-bind

# performance checks not enforced due to segmentation fault
# performance-for-range-copy

# Todo: define a better regex match that includes most project headers, but excludes third party
# code.
HeaderFilterRegex: '^src/.*'
2 changes: 1 addition & 1 deletion example/extensions/lib_custom_op/transposerowsp_lib.cc
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ void transpose(MXTensor& src, MXTensor& dst, const OpResource& res) {
res.alloc_sparse(B, 0, mp.size());
float *Bval = (float*) (B->data);
int didx = 0, iidx = 0;
for(auto i : mp) {
for(const auto& i : mp) {
B->indices[iidx++] = i.first;
for(auto j : i.second) {
Bval[didx++] = j;
Expand Down
1 change: 1 addition & 0 deletions src/api/operator/numpy/np_delete_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,7 @@ MXNET_REGISTER_API("_npi.delete")
}
}
std::vector<NDArray*> inputs;
inputs.reserve(num_inputs);
for (int i = 0; i < num_inputs; ++i) {
inputs.push_back(args[i].operator mxnet::NDArray*());
}
Expand Down
3 changes: 3 additions & 0 deletions src/api/operator/numpy/np_insert_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@ MXNET_REGISTER_API("_npi.insert_scalar")
attrs.op = op;
SetAttrDict<op::NumpyInsertParam>(&attrs);
std::vector<NDArray*> inputs;
inputs.reserve(num_inputs);
for (int i = 0; i < num_inputs; ++i) {
inputs.push_back(args[i].operator mxnet::NDArray*());
}
Expand Down Expand Up @@ -109,6 +110,7 @@ MXNET_REGISTER_API("_npi.insert_slice")
attrs.op = op;
SetAttrDict<op::NumpyInsertParam>(&attrs);
std::vector<NDArray*> inputs;
inputs.reserve(num_inputs);
for (int i = 0; i < num_inputs; ++i) {
inputs.push_back(args[i].operator mxnet::NDArray*());
}
Expand Down Expand Up @@ -145,6 +147,7 @@ MXNET_REGISTER_API("_npi.insert_tensor")
attrs.op = op;
SetAttrDict<op::NumpyInsertParam>(&attrs);
std::vector<NDArray*> inputs;
inputs.reserve(num_inputs);
for (int i = 0; i < num_inputs; ++i) {
inputs.push_back(args[i].operator mxnet::NDArray*());
}
Expand Down
3 changes: 3 additions & 0 deletions src/api/operator/numpy/np_matrix_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -155,6 +155,7 @@ MXNET_REGISTER_API("_npi.concatenate")
SetAttrDict<op::NumpyConcatenateParam>(&attrs);
int num_inputs = arg_size - 2;
std::vector<NDArray*> inputs;
inputs.reserve(num_inputs);
for (int i = 0; i < num_inputs; ++i) {
inputs.push_back(args[i].operator mxnet::NDArray*());
}
Expand Down Expand Up @@ -303,6 +304,7 @@ MXNET_REGISTER_API("_npi.column_stack")
SetAttrDict<op::NumpyColumnStackParam>(&attrs);
int num_outputs = 0;
std::vector<NDArray*> inputs;
inputs.reserve(param.num_args);
for (int i = 0; i < param.num_args; ++i) {
inputs.push_back(args[i].operator mxnet::NDArray*());
}
Expand All @@ -323,6 +325,7 @@ MXNET_REGISTER_API("_npi.hstack")
SetAttrDict<op::ConcatParam>(&attrs);
int num_outputs = 0;
std::vector<NDArray*> inputs;
inputs.reserve(param.num_args);
for (int i = 0; i < param.num_args; ++i) {
inputs.push_back(args[i].operator mxnet::NDArray*());
}
Expand Down
1 change: 1 addition & 0 deletions src/api/operator/random/np_uniform_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ MXNET_REGISTER_API("_npi.uniform")
num_inputs = 2;
}
}
inputs.reserve(num_inputs);
for (int i = 0; i < num_inputs; ++i) {
inputs.push_back(args[i].operator mxnet::NDArray*());
}
Expand Down
8 changes: 4 additions & 4 deletions src/c_api/c_api.cc
Original file line number Diff line number Diff line change
Expand Up @@ -982,7 +982,7 @@ void registerOperators(void *lib, int verbose, mxnet::ext::msgSize_t msgSize,
// InferSType is defined in customized lib.
// convert attributes to vector of char*
std::vector<const char*> attr_keys, attr_vals;
for (auto kv : attrs.dict) {
for (const auto& kv : attrs.dict) {
attr_keys.push_back(kv.first.c_str());
attr_vals.push_back(kv.second.c_str());
}
Expand Down Expand Up @@ -1047,7 +1047,7 @@ void registerOperators(void *lib, int verbose, mxnet::ext::msgSize_t msgSize,
p->attrs.name = n->attrs.name + "_backward";
// copy attributes and subgraphs
p->attrs.dict = n->attrs.dict;
for (auto s : n->attrs.subgraphs)
for (const auto& s : n->attrs.subgraphs)
p->attrs.subgraphs.push_back(s);
// set control dependency and attr parser
p->control_deps.emplace_back(n);
Expand Down Expand Up @@ -1101,8 +1101,8 @@ void registerOperators(void *lib, int verbose, mxnet::ext::msgSize_t msgSize,

// determine amount of memory needed to store all the input shapes
size_t buff_size = 0;
for (size_t i = 0; i < in_shapes.size(); ++i)
buff_size += in_shapes[i].ndim();
for (const auto & in_shape : in_shapes)
buff_size += in_shape.ndim();

// copy input shapes to raw memory layout
std::vector<uint32_t> inbuff(buff_size);
Expand Down
1 change: 1 addition & 0 deletions src/c_api/c_api_ndarray.cc
Original file line number Diff line number Diff line change
Expand Up @@ -168,6 +168,7 @@ int MXCreateCachedOp(SymbolHandle handle,
nnvm::Symbol* sym = static_cast<nnvm::Symbol*>(handle);
API_BEGIN();
std::vector<std::pair<std::string, std::string> > flags;
flags.reserve(num_flags);
for (int i = 0; i < num_flags; ++i) {
flags.emplace_back(keys[i], vals[i]);
}
Expand Down
1 change: 1 addition & 0 deletions src/imperative/eliminate_common_expr_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ using NodeInput = std::pair<const Node*, uint32_t>;
*/
std::vector<NodeInput> ConvertInputs(const std::vector<nnvm::NodeEntry>& inputs) {
std::vector<NodeInput> ret;
ret.reserve(inputs.size());
for (const auto& entry : inputs) {
ret.emplace_back(entry.node.get(), entry.index);
}
Expand Down
2 changes: 1 addition & 1 deletion src/io/dataset.cc
Original file line number Diff line number Diff line change
Expand Up @@ -439,7 +439,7 @@ class GroupDataset final : public Dataset {
CHECK_LT(idx, size_)
<< "GetItem index: " << idx << " out of bound: " << size_;
ret->clear();
for (auto child : childs_) {
for (const auto& child : childs_) {
std::vector<NDArray> temp_ret;
if (!child->GetItem(idx, &temp_ret)) return false;
ret->insert(ret->end(), temp_ret.begin(), temp_ret.end());
Expand Down
14 changes: 9 additions & 5 deletions src/lib_api.cc
Original file line number Diff line number Diff line change
Expand Up @@ -250,8 +250,8 @@ std::string mxnet::ext::getShapeAt(const std::string& shape, unsigned index) {
int idx = 1; // start at 1 to skip the first square bracket [
// find the beginning of the output shape for the particular output index
for (unsigned x=0; x < index; x++)
idx = shape.find("[", idx+1);
int stop = shape.find("]", idx); // find stop index for this output shape
idx = shape.find('[', idx+1);
int stop = shape.find(']', idx); // find stop index for this output shape
// add this shape to the list
return shape.substr(idx, stop-idx+1);
}
Expand All @@ -260,9 +260,9 @@ std::string mxnet::ext::getDtypeAt(const std::string& dtype, unsigned index) {
// find the beginning of the output dtype for the particular output index
int idx = 0;
for (unsigned x=0; x < index; x++)
idx = dtype.find(",", idx+1);
int stop = dtype.find(",", idx+1); // find stop index for this output dtype
if (stop == -1) stop = dtype.find("]", idx+1);
idx = dtype.find(',', idx+1);
int stop = dtype.find(',', idx+1); // find stop index for this output dtype
if (stop == -1) stop = dtype.find(']', idx+1);
return dtype.substr(idx+1, stop-idx-1);
}

Expand Down Expand Up @@ -1467,6 +1467,7 @@ MX_INT_RET _partCallReviewSubgraph(mxnet::ext::reviewSubgraph_t reviewSubgraph,
std::unordered_map<std::string, mxnet::ext::MXTensor> args;
for (int i = 0; i < num_args; i++) {
std::vector<int64_t> shapes;
shapes.reserve(arg_dims[i]);
for (int j = 0; j < arg_dims[i]; j++)
shapes.push_back(arg_shapes[i][j]);

Expand All @@ -1478,6 +1479,7 @@ MX_INT_RET _partCallReviewSubgraph(mxnet::ext::reviewSubgraph_t reviewSubgraph,
std::unordered_map<std::string, mxnet::ext::MXTensor> aux;
for (int i = 0; i < num_aux; i++) {
std::vector<int64_t> shapes;
shapes.reserve(aux_dims[i]);
for (int j = 0; j < aux_dims[i]; j++)
shapes.push_back(aux_shapes[i][j]);

Expand Down Expand Up @@ -1553,6 +1555,7 @@ MX_INT_RET _passCallGraphPass(mxnet::ext::graphPass_t graphPass, const char *jso
std::unordered_map<std::string, mxnet::ext::MXTensor> args;
for (int i = 0; i < num_args; i++) {
std::vector<int64_t> shapes;
shapes.reserve(arg_dims[i]);
for (int j = 0; j < arg_dims[i]; j++)
shapes.push_back(arg_shapes[i][j]);

Expand All @@ -1565,6 +1568,7 @@ MX_INT_RET _passCallGraphPass(mxnet::ext::graphPass_t graphPass, const char *jso
std::unordered_map<std::string, mxnet::ext::MXTensor> aux;
for (int i = 0; i < num_aux; i++) {
std::vector<int64_t> shapes;
shapes.reserve(aux_dims[i]);
for (int j = 0; j < aux_dims[i]; j++)
shapes.push_back(aux_shapes[i][j]);

Expand Down
1 change: 1 addition & 0 deletions src/nnvm/tvm_bridge.cc
Original file line number Diff line number Diff line change
Expand Up @@ -148,6 +148,7 @@ void WrapAsyncCall(TVMArgs wrap_args, TVMRetValue* wrap_rv) {

// sorted position of constant arguments
std::vector<int> const_loc;
const_loc.reserve(num_const);
for (int i = 0; i < num_const; ++i) {
const_loc.push_back(wrap_args[i + 3].operator int());
}
Expand Down
14 changes: 10 additions & 4 deletions src/operator/bilinear_sampler.cc
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,8 @@
* \author Xu Dong
*/

#include <cmath>

#include "./bilinear_sampler-inl.h"

namespace mshadow {
Expand All @@ -48,8 +50,10 @@ inline void BilinearSamplerForward(const Tensor<cpu, 4, DType> &output,
index_t grid_index = n * o_h * o_w * 2 + h * o_w + w;
DType y_real = (*(grid + grid_index + o_h * o_w) + 1) * (i_h - 1) / 2;
DType x_real = (*(grid + grid_index) + 1) * (i_w - 1) / 2;
int top_left_y = static_cast<int>(floor(y_real));
int top_left_x = static_cast<int>(floor(x_real));
// NOLINTNEXTLINE
int top_left_y = static_cast<int>(std::floor(y_real));
// NOLINTNEXTLINE
int top_left_x = static_cast<int>(std::floor(x_real));
DType top_left_y_w = 1.0 - (y_real - top_left_y);
DType top_left_x_w = 1.0 - (x_real - top_left_x);
int data_index = n * i_c * i_h * i_w + c * i_h * i_w +
Expand Down Expand Up @@ -100,8 +104,10 @@ inline void BilinearSamplerBackward(const Tensor<cpu, 4, DType> &gdata,
index_t grid_src_index = n * o_h * o_w * 2 + h * o_w + w;
DType y_real = (*(grid_src + grid_src_index + o_h * o_w) + 1) * (i_h - 1) / 2;
DType x_real = (*(grid_src + grid_src_index) + 1) * (i_w - 1) / 2;
int top_left_y = static_cast<int>(floor(y_real));
int top_left_x = static_cast<int>(floor(x_real));
// NOLINTNEXTLINE
int top_left_y = static_cast<int>(std::floor(y_real));
// NOLINTNEXTLINE
int top_left_x = static_cast<int>(std::floor(x_real));
DType top_left_y_w = 1.0 - (y_real - top_left_y);
DType top_left_x_w = 1.0 - (x_real - top_left_x);
for (index_t c = 0; c < static_cast<index_t>(o_c); ++c) {
Expand Down
2 changes: 2 additions & 0 deletions src/operator/custom/ndarray_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,7 @@ void NDArrayOp<xpu>::Forward(const OpContext &ctx,
ndvar.resize(std::unique(ndvar.begin(), ndvar.end()) - ndvar.begin());

std::vector<NDArray> ndcpy;
ndcpy.reserve(ptrs.size());
for (auto& i : ptrs) {
ndcpy.push_back(*reinterpret_cast<NDArray*>(i));
}
Expand Down Expand Up @@ -129,6 +130,7 @@ void NDArrayOp<xpu>::Backward(const OpContext &ctx,
}

std::vector<NDArray> ndcpy;
ndcpy.reserve(ptrs.size());
for (auto& i : ptrs) {
ndcpy.push_back(*reinterpret_cast<NDArray*>(i));
}
Expand Down
1 change: 1 addition & 0 deletions src/operator/nn/concat.cc
Original file line number Diff line number Diff line change
Expand Up @@ -326,6 +326,7 @@ DMLC_REGISTER_PARAMETER(ConcatParam);
[](const NodeAttrs& attrs) { \
const ConcatParam& params = nnvm::get<ConcatParam>(attrs.parsed); \
std::vector<std::string> ret; \
ret.reserve(params.num_args); \
for (int i = 0; i < params.num_args; ++i) { \
ret.push_back(std::string("arg") + std::to_string(i)); \
} \
Expand Down
1 change: 1 addition & 0 deletions src/operator/nn/upsampling.cc
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,7 @@ static bool UpSamplingShape(const nnvm::NodeAttrs& attrs,
static inline std::vector<std::string> ListArguments(const UpSamplingParam& param) {
if (param.sample_type == up_enum::kNearest) {
std::vector<std::string> ret;
ret.reserve(param.num_args);
for (int i = 0; i < param.num_args; ++i) {
ret.push_back(std::string("arg") + std::to_string(i));
}
Expand Down
1 change: 1 addition & 0 deletions src/operator/numpy/np_einsum_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -342,6 +342,7 @@ NNVM_REGISTER_OP(_npi_einsum)
[](const nnvm::NodeAttrs& attrs) {
int num_args = dmlc::get<NumpyEinsumParam>(attrs.parsed).num_args;
std::vector<std::string> ret;
ret.reserve(num_args);
for (int i = 0; i < num_args; i++) {
ret.push_back(std::string("arg") + std::to_string(i));
}
Expand Down
1 change: 1 addition & 0 deletions src/operator/numpy/np_init_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -161,6 +161,7 @@ NNVM_REGISTER_OP(_npi_atleast_##N##d) \
[](const nnvm::NodeAttrs& attrs) { \
int num_args = nnvm::get<AtleastNDParam>(attrs.parsed).num_args; \
std::vector<std::string> ret; \
ret.reserve(num_args); \
for (int i = 0; i < num_args; i++) { \
ret.push_back(std::string("ary") + std::to_string(i)); \
} \
Expand Down
5 changes: 5 additions & 0 deletions src/operator/numpy/np_matrix_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -690,6 +690,7 @@ NNVM_REGISTER_OP(_npi_concatenate)
[](const NodeAttrs& attrs) {
const NumpyConcatenateParam& params = nnvm::get<NumpyConcatenateParam>(attrs.parsed);
std::vector<std::string> ret;
ret.reserve(params.num_args);
for (int i = 0; i < params.num_args; ++i) {
ret.push_back(std::string("data") + std::to_string(i));
}
Expand Down Expand Up @@ -878,6 +879,7 @@ NNVM_REGISTER_OP(_npi_column_stack)
[](const nnvm::NodeAttrs& attrs) {
int num_args = dmlc::get<NumpyColumnStackParam>(attrs.parsed).num_args;
std::vector<std::string> ret;
ret.reserve(num_args);
for (int i = 0; i < num_args; ++i) {
ret.push_back(std::string("arg") + std::to_string(i));
}
Expand Down Expand Up @@ -1020,6 +1022,7 @@ NNVM_REGISTER_OP(_npi_vstack)
[](const nnvm::NodeAttrs& attrs) {
int num_args = dmlc::get<NumpyVstackParam>(attrs.parsed).num_args;
std::vector<std::string> ret;
ret.reserve(num_args);
for (int i = 0; i < num_args; i++) {
ret.push_back(std::string("arg") + std::to_string(i));
}
Expand Down Expand Up @@ -1055,6 +1058,7 @@ NNVM_REGISTER_OP(_npi_hstack)
[](const NodeAttrs& attrs) {
const ConcatParam& params = nnvm::get<ConcatParam>(attrs.parsed);
std::vector<std::string> ret;
ret.reserve(params.num_args);
for (int i = 0; i < params.num_args; ++i) {
ret.push_back(std::string("data") + std::to_string(i));
}
Expand Down Expand Up @@ -1093,6 +1097,7 @@ NNVM_REGISTER_OP(_npi_dstack)
[](const NodeAttrs& attrs) {
const ConcatParam& params = nnvm::get<ConcatParam>(attrs.parsed);
std::vector<std::string> ret;
ret.reserve(params.num_args);
for (int i = 0; i < params.num_args; ++i) {
ret.push_back(std::string("data") + std::to_string(i));
}
Expand Down
2 changes: 2 additions & 0 deletions src/operator/optimizer_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -389,6 +389,7 @@ Where the parameter ``momentum`` is the decay rate of momentum estimates at each
[](const nnvm::NodeAttrs& attrs) {
std::vector<uint32_t> ret;
const MultiSGDMomParam& param = dmlc::get<MultiSGDMomParam>(attrs.parsed);
ret.reserve(param.num_weights);
for (int i = 0; i < param.num_weights; ++i) {
ret.push_back(i * 3 + 2);
}
Expand Down Expand Up @@ -432,6 +433,7 @@ It updates the weights using::
[](const nnvm::NodeAttrs& attrs) {
std::vector<uint32_t> ret;
const MultiSGDParam& param = dmlc::get<MultiSGDParam>(attrs.parsed);
ret.reserve(param.num_weights);
for (int i = 0; i < param.num_weights; ++i) {
ret.push_back(i * 3 + 2);
}
Expand Down
1 change: 1 addition & 0 deletions src/operator/quantization/quantized_concat.cc
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,7 @@ If any input holds int8, then the output will be int8. Otherwise output will be
.set_attr<nnvm::FListInputNames>("FListInputNames", [](const NodeAttrs& attrs) {
const ConcatParam& params = nnvm::get<ConcatParam>(attrs.parsed);
std::vector<std::string> ret;
ret.reserve(params.num_args);
for (int i = 0; i < params.num_args; ++i) {
ret.push_back(std::string("arg") + std::to_string(i));
}
Expand Down
1 change: 1 addition & 0 deletions src/operator/subgraph/build_subgraph.cc
Original file line number Diff line number Diff line change
Expand Up @@ -682,6 +682,7 @@ void AdjustSubgraphNode(nnvm::Graph* g,
const SubgraphSelectorV2Ptr& subgraph_selector,
const size_t subgraph_id) {
std::vector<nnvm::Node*> node_list;
node_list.reserve(subgraph_nodes.size());
for (auto node : subgraph_nodes) {
node_list.push_back(node->node);
}
Expand Down
2 changes: 2 additions & 0 deletions src/operator/tensor/amp_cast.cc
Original file line number Diff line number Diff line change
Expand Up @@ -189,6 +189,7 @@ It casts only between low precision float/FP32 and does not do anything for othe
int num_args =
dmlc::get<AMPMultiCastParam>(attrs.parsed).num_outputs;
std::vector<std::pair<int, int>> ret;
ret.reserve(num_args);
for (int i = 0; i < num_args; ++i) {
ret.emplace_back(i, i);
}
Expand Down Expand Up @@ -234,6 +235,7 @@ NNVM_REGISTER_OP(_backward_amp_multicast)
[](const NodeAttrs& attrs){
int num_args = dmlc::get<AMPMultiCastParam>(attrs.parsed).num_outputs;
std::vector<std::pair<int, int>> ret;
ret.reserve(num_args);
for (int i = 0; i < num_args; ++i) {
ret.emplace_back(i, i);
}
Expand Down