Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

[MXNET-860] Reduce redundant copies, check for regressions with clang-tidy #12355

Merged
merged 2 commits into from
Sep 18, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .clang-tidy
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ Checks: >

# In order to trigger an error, you must have a rule defined both in checks and in this section.
WarningsAsErrors: >
cppcoreguidelines-no-malloc
cppcoreguidelines-no-malloc, performance-unnecessary-copy-initialization

# Todo: define a better regex match that includes most project headers, but excludes third party
# code.
Expand Down
4 changes: 2 additions & 2 deletions src/ndarray/ndarray.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1031,7 +1031,7 @@ inline void CopyFromToRspImpl(const NDArray& from, const NDArray& to, RunContext
op::FillZerosRspImpl(s, to);
return;
}
auto aux_shape = from.aux_shape(rowsparse::kIdx);
const auto& aux_shape = from.aux_shape(rowsparse::kIdx);
to.CheckAndAlloc({aux_shape});
TBlob val = to.data();
TBlob idx = to.aux_data(rowsparse::kIdx);
Expand Down Expand Up @@ -1122,7 +1122,7 @@ void CopyFromToImpl(const NDArray& from, const NDArray& to,
if (from_stype == to_stype) {
casted_nd = from; // same stype, no need to cast from
} else { // different stypes on different ctx needs an temporary casted_nd
TShape shape = from.shape();
const TShape& shape = from.shape();
if (to_stype == kDefaultStorage) {
casted_nd = NDArray(shape, from_ctx);
} else {
Expand Down
6 changes: 3 additions & 3 deletions src/operator/contrib/roi_align.cc
Original file line number Diff line number Diff line change
Expand Up @@ -422,7 +422,7 @@ void ROIAlignForwardCompute(const nnvm::NodeAttrs& attrs,
CHECK_EQ(out_data.size(), expected_out);
CHECK_EQ(out_data[roialign::kOut].shape_[0], in_data[roialign::kBox].shape_[0]);

const ROIAlignParam param = nnvm::get<ROIAlignParam>(attrs.parsed);
const ROIAlignParam& param = nnvm::get<ROIAlignParam>(attrs.parsed);

const int count = out_data[roialign::kOut].Size();
// const int num_rois = in_data[roialign::kBox].size(0);
Expand Down Expand Up @@ -466,7 +466,7 @@ void ROIAlignBackwardCompute(const nnvm::NodeAttrs& attrs,
CHECK_NE(req[1], kWriteInplace) <<
"ROIAlign: Backward doesn't support kWriteInplace.";

const ROIAlignParam param = nnvm::get<ROIAlignParam>(attrs.parsed);
const ROIAlignParam& param = nnvm::get<ROIAlignParam>(attrs.parsed);

const int count = out_grad[0].Size();
const int num_rois = in_data[0].size(0);
Expand Down Expand Up @@ -534,7 +534,7 @@ He, Kaiming, et al. "Mask R-CNN." ICCV, 2017
.set_attr<nnvm::FInferShape>("FInferShape", [](const nnvm::NodeAttrs& attrs,
std::vector<TShape> *in_shape, std::vector<TShape> *out_shape){
using namespace mshadow;
const ROIAlignParam param = nnvm::get<ROIAlignParam>(attrs.parsed);
const ROIAlignParam& param = nnvm::get<ROIAlignParam>(attrs.parsed);
CHECK_EQ(in_shape->size(), 2) << "Input:[data, rois]";
// data: [batch_size, c, h, w]
TShape dshape = in_shape->at(roialign::kData);
Expand Down