Skip to content

Commit

Permalink
Update clang-tidy integration (apache#18815)
Browse files Browse the repository at this point in the history
Run clang-tidy via cmake only on the code managed by mxnet (and not 3rdparty dependencies), update to clang-tidy-10 and run clang-tidy-10 -fix to fix all the warnings that are enforced on CI.

Developers can run clang-tidy by specifying the -DCMAKE_CXX_CLANG_TIDY="clang-tidy-10" to cmake, or using the python ci/build.py -R --platform ubuntu_cpu /work/runtime_functions.sh build_ubuntu_cpu_clang_tidy script.
  • Loading branch information
leezu committed Jul 29, 2020
1 parent b685fad commit 6bbd531
Show file tree
Hide file tree
Showing 100 changed files with 479 additions and 462 deletions.
52 changes: 20 additions & 32 deletions .clang-tidy
Original file line number Diff line number Diff line change
Expand Up @@ -17,45 +17,33 @@

# The checks defined here will be run and will display by default as warnings.
Checks: >
-*, cppcoreguidelines-c-copy-assignment-signature,
cppcoreguidelines-interfaces-global-init, cppcoreguidelines-no-malloc,
cppcoreguidelines-pro-bounds-constant-array-index, cppcoreguidelines-pro-type-const-cast,
cppcoreguidelines-pro-type-cstyle-cast, cppcoreguidelines-pro-type-member-init,
cppcoreguidelines-pro-type-static-cast-downcast, cppcoreguidelines-pro-type-union-access,
cppcoreguidelines-pro-type-vararg, cppcoreguidelines-slicing,
cppcoreguidelines-special-member-functions, clang-analyzer-security.FloatLoopCounter,
clang-analyzer-security.insecureAPI.*, clang-analyzer-core.CallAndMessage,
clang-analyzer-core.DivideZero, clang-analyzer-core.DynamicTypePropagation,
clang-analyzer-core.NonNullParamChecker, clang-analyzer-core.NullDereference,
clang-analyzer-core.StackAddressEscape, clang-analyzer-core.UndefinedBinaryOperatorResult,
clang-analyzer-core.VLASize, clang-analyzer-core.builtin.BuiltinFunctions,
clang-analyzer-core.builtin.NoReturnFunctions, clang-analyzer-core.uninitialized.ArraySubscript,
clang-analyzer-core.uninitialized.Assign, clang-analyzer-core.uninitialized.Branch,
clang-analyzer-core.uninitialized.CapturedBlockVariable,
clang-analyzer-core.uninitialized.UndefReturn, clang-analyzer-cplusplus.NewDelete,
clang-analyzer-cplusplus.NewDeleteLeaks, clang-analyzer-cplusplus.SelfAssignment,
clang-analyzer-deadcode.DeadStores, modernize-avoid-bind, modernize-deprecated-headers,
modernize-loop-convert, modernize-make-shared, modernize-pass-by-value,
-*, cppcoreguidelines-* clang-analyzer-*, modernize-*,
performance-faster-string-find, performance-for-range-copy,
performance-implicit-conversion-in-loop, performance-inefficient-algorithm,
performance-inefficient-string-concatenation, performance-trivially-destructible,
performance-inefficient-vector-operation, performance-move-const-arg,
performance-move-constructor-init, performance-noexcept-move-constructor,
performance-no-automatic-move, performance-unnecessary-copy-initialization,
performance-type-promotion-in-math-fn
# performance checks not enabled due to segmentation fault in clang-tidy v8+:
# performance-unnecessary-value-param

# In order to trigger an error, you must have a rule defined both in checks and in this section.
WarningsAsErrors: >
cppcoreguidelines-no-malloc, modernize-deprecated-headers,
modernize-loop-convert, modernize-make-shared, modernize-pass-by-value, modernize-make-unique,
modernize-raw-string-literal, modernize-redundant-void-arg, modernize-replace-auto-ptr,
modernize-replace-random-shuffle, modernize-return-braced-init-list, modernize-shrink-to-fit,
modernize-unary-static-assert, modernize-use-bool-literals, modernize-use-default-member-init,
modernize-use-emplace, modernize-use-equals-default, modernize-use-equals-delete,
modernize-use-noexcept, modernize-use-nullptr, modernize-use-override,
modernize-use-transparent-functors, modernize-use-using, performance-*
modernize-use-transparent-functors, modernize-use-using,
performance-unnecessary-copy-initialization, performance-move-const-arg
# cppcoreguidelines checks not enabled:
# cppcoreguidelines-pro-bounds-pointer-arithmetic
# cppcoreguidelines-pro-bounds-array-to-pointer-decay
# cppcoreguidelines-pro-type-reinterpret-cast

# modernize checks not enabled:
# modernize checks not enforced:
# modernize-use-auto
# modernize-make-unique (C++14 and newer only)

# In order to trigger an error, you must have a rule defined both in checks and in this section.
WarningsAsErrors: >
cppcoreguidelines-no-malloc, modernize-use-nullptr, performance-unnecessary-copy-initialization,
modernize-use-emplace, performance-move-const-arg
# modernize-avoid-bind

# Todo: define a better regex match that includes most project headers, but excludes third party
# code.
Expand Down
13 changes: 12 additions & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -275,6 +275,7 @@ if(USE_MKLDNN)
include_directories(${PROJECT_BINARY_DIR}/3rdparty/mkldnn/include)
add_definitions(-DMXNET_USE_MKLDNN=1)
list(APPEND mxnet_LINKER_LIBS dnnl)
set_target_properties(dnnl PROPERTIES CXX_CLANG_TIDY "") # don't lint 3rdparty dependency
endif()

# Allow Cuda compiles outside of src tree to find things in 'src' and 'include'
Expand Down Expand Up @@ -405,6 +406,7 @@ if(USE_OPENMP)
AND NOT CMAKE_CROSSCOMPILING)
load_omp()
list(APPEND mxnet_LINKER_LIBS omp)
set_target_properties(omp PROPERTIES CXX_CLANG_TIDY "") # don't lint 3rdparty dependency
if(UNIX)
list(APPEND mxnet_LINKER_LIBS pthread)
endif()
Expand Down Expand Up @@ -462,6 +464,8 @@ set(GTEST_MAIN_LIBRARY gtest_main)
set(GTEST_LIBRARY gtest)

add_subdirectory(${GTEST_ROOT})
set_target_properties(gtest PROPERTIES CXX_CLANG_TIDY "") # don't lint 3rdparty dependency
set_target_properties(gtest_main PROPERTIES CXX_CLANG_TIDY "") # don't lint 3rdparty dependency
find_package(GTest REQUIRED)

# cudnn detection
Expand All @@ -478,6 +482,7 @@ endif()

if(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/3rdparty/dmlc-core/cmake)
add_subdirectory("3rdparty/dmlc-core")
set_target_properties(dmlc PROPERTIES CXX_CLANG_TIDY "") # don't lint 3rdparty dependency
endif()

FILE(GLOB_RECURSE SOURCE "src/*.cc" "src/*.h" "include/*.h")
Expand All @@ -492,7 +497,9 @@ FILE(GLOB_RECURSE NNVMSOURCE
3rdparty/tvm/nnvm/src/core/*.h
3rdparty/tvm/nnvm/src/pass/*.h
3rdparty/tvm/nnvm/include/*.h)
list(APPEND SOURCE ${NNVMSOURCE})
add_library(nnvm OBJECT ${NNVMSOURCE})
set_target_properties(nnvm PROPERTIES CXX_CLANG_TIDY "") # don't lint 3rdparty dependency
list(APPEND SOURCE $<TARGET_OBJECTS:nnvm>)

# add source group
FILE(GLOB_RECURSE GROUP_SOURCE "src/*.cc" "3rdparty/tvm/nnvm/*.cc" "plugin/*.cc")
Expand Down Expand Up @@ -743,6 +750,7 @@ if(USE_DIST_KVSTORE)
add_subdirectory("3rdparty/ps-lite")
add_definitions(-DMXNET_USE_DIST_KVSTORE)
list(APPEND mxnet_LINKER_LIBS pslite)
set_target_properties(pslite PROPERTIES CXX_CLANG_TIDY "") # don't lint 3rdparty dependency
endif()

if(USE_MKLDNN)
Expand All @@ -757,6 +765,9 @@ function(BuildTVMOP)
# scope the variables in BuildTVM.cmake to avoid conflict
include(cmake/BuildTVM.cmake)
add_subdirectory("3rdparty/tvm")
set_target_properties(tvm PROPERTIES CXX_CLANG_TIDY "") # don't lint 3rdparty dependency
set_target_properties(tvm_topi PROPERTIES CXX_CLANG_TIDY "") # don't lint 3rdparty dependency
set_target_properties(tvm_runtime PROPERTIES CXX_CLANG_TIDY "") # don't lint 3rdparty dependency
endfunction()

if(USE_TVM_OP)
Expand Down
2 changes: 1 addition & 1 deletion ci/docker/Dockerfile.build.ubuntu
Original file line number Diff line number Diff line change
Expand Up @@ -53,9 +53,9 @@ RUN export DEBIAN_FRONTEND=noninteractive && \
protobuf-compiler \
libprotobuf-dev \
clang-6.0 \
clang-tidy-6.0 \
python-yaml \
clang-10 \
clang-tidy-10 \
g++ \
g++-8 \
intel-mkl-2020.0-088 \
Expand Down
7 changes: 2 additions & 5 deletions ci/docker/runtime_functions.sh
Original file line number Diff line number Diff line change
Expand Up @@ -462,21 +462,18 @@ build_ubuntu_cpu_clang100() {
build_ubuntu_cpu_clang_tidy() {
set -ex
cd /work/build
export CLANG_TIDY=/usr/lib/llvm-6.0/share/clang/run-clang-tidy.py
# TODO(leezu) USE_OPENMP=OFF 3rdparty/dmlc-core/CMakeLists.txt:79 broken?
CXX=clang++-6.0 CC=clang-6.0 cmake \
CXX=clang++-10 CC=clang-10 cmake \
-DUSE_MKL_IF_AVAILABLE=OFF \
-DUSE_MKLDNN=OFF \
-DUSE_CUDA=OFF \
-DUSE_OPENMP=OFF \
-DCMAKE_BUILD_TYPE=Debug \
-DUSE_DIST_KVSTORE=ON \
-DUSE_CPP_PACKAGE=ON \
-DCMAKE_EXPORT_COMPILE_COMMANDS=ON \
-DCMAKE_CXX_CLANG_TIDY=clang-tidy-10 \
-G Ninja /work/mxnet
ninja
cd /work/mxnet
$CLANG_TIDY -p /work/build -j $(nproc) -clang-tidy-binary clang-tidy-6.0 /work/mxnet/src
}

build_ubuntu_cpu_clang6_mkldnn() {
Expand Down
11 changes: 6 additions & 5 deletions example/extensions/lib_custom_op/gemm_lib.cc
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
*/

#include <iostream>
#include <utility>
#include "lib_api.h"

// main matrix multiplication routine
Expand Down Expand Up @@ -179,23 +180,23 @@ REGISTER_OP(my_gemm)
class MyStatefulGemm : public CustomStatefulOp {
public:
explicit MyStatefulGemm(int count,
const std::unordered_map<std::string, std::string>& attrs)
: count(count), attrs_(attrs) {}
std::unordered_map<std::string, std::string> attrs)
: count(count), attrs_(std::move(attrs)) {}

MXReturnValue Forward(std::vector<MXTensor>* inputs,
std::vector<MXTensor>* outputs,
const OpResource& op_res) {
const OpResource& op_res) override {
std::cout << "Info: keyword + number of forward: " << ++count << std::endl;
return forward(attrs_, inputs, outputs, op_res);
}

MXReturnValue Backward(std::vector<MXTensor>* inputs,
std::vector<MXTensor>* outputs,
const OpResource& op_res) {
const OpResource& op_res) override {
return backward(attrs_, inputs, outputs, op_res);
}

~MyStatefulGemm() {}
~MyStatefulGemm() = default;

private:
int count;
Expand Down
9 changes: 5 additions & 4 deletions example/extensions/lib_custom_op/transposecsr_lib.cc
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
*/

#include <iostream>
#include <utility>
#include "lib_api.h"

void transpose(MXTensor& src, MXTensor& dst, const OpResource& res) {
Expand Down Expand Up @@ -151,19 +152,19 @@ REGISTER_OP(my_transposecsr)
class MyStatefulTransposeCSR : public CustomStatefulOp {
public:
explicit MyStatefulTransposeCSR(int count,
const std::unordered_map<std::string, std::string>& attrs)
: count(count), attrs_(attrs) {}
std::unordered_map<std::string, std::string> attrs)
: count(count), attrs_(std::move(attrs)) {}

MXReturnValue Forward(std::vector<MXTensor>* inputs,
std::vector<MXTensor>* outputs,
const OpResource& op_res) {
const OpResource& op_res) override {
std::cout << "Info: keyword + number of forward: " << ++count << std::endl;
return forward(attrs_, inputs, outputs, op_res);
}

MXReturnValue Backward(std::vector<MXTensor>* inputs,
std::vector<MXTensor>* outputs,
const OpResource& op_res) {
const OpResource& op_res) override {
return backward(attrs_, inputs, outputs, op_res);
}

Expand Down
9 changes: 5 additions & 4 deletions example/extensions/lib_custom_op/transposerowsp_lib.cc
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
*/

#include <iostream>
#include <utility>
#include "lib_api.h"

void transpose(MXTensor& src, MXTensor& dst, const OpResource& res) {
Expand Down Expand Up @@ -153,19 +154,19 @@ REGISTER_OP(my_transposerowsp)
class MyStatefulTransposeRowSP : public CustomStatefulOp {
public:
explicit MyStatefulTransposeRowSP(int count,
const std::unordered_map<std::string, std::string>& attrs)
: count(count), attrs_(attrs) {}
std::unordered_map<std::string, std::string> attrs)
: count(count), attrs_(std::move(attrs)) {}

MXReturnValue Forward(std::vector<MXTensor>* inputs,
std::vector<MXTensor>* outputs,
const OpResource& op_res) {
const OpResource& op_res) override {
std::cout << "Info: keyword + number of forward: " << ++count << std::endl;
return forward(attrs_, inputs, outputs, op_res);
}

MXReturnValue Backward(std::vector<MXTensor>* inputs,
std::vector<MXTensor>* outputs,
const OpResource& op_res) {
const OpResource& op_res) override {
return backward(attrs_, inputs, outputs, op_res);
}

Expand Down
5 changes: 2 additions & 3 deletions example/extensions/lib_pass/pass_lib.cc
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
* \brief subgraph operator implementation library file
*/

#include <math.h>
#include <cmath>
#include <iostream>
#include <algorithm>
#include "lib_api.h"
Expand Down Expand Up @@ -67,8 +67,7 @@ MXReturnValue jsonPass(const std::string& in_graph, const std::string** out_grap
JsonVal nodes = json_val.map[JsonVal("nodes")];

// loop over nodes
for(int i=0; i<nodes.list.size(); i++) {
JsonVal node = nodes.list[i];
for(auto node : nodes.list) {
// get the op name
std::string op = node.map[JsonVal("op")].str;
// get node ID inputs to op
Expand Down
28 changes: 14 additions & 14 deletions example/extensions/lib_subgraph/subgraph_lib.cc
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,10 @@
* \brief subgraph operator implementation library file
*/

#include <math.h>
#include <cmath>
#include <iostream>
#include <algorithm>
#include <utility>
#include "lib_api.h"

/* function to execute log operator on floats */
Expand Down Expand Up @@ -69,8 +70,7 @@ MXReturnValue myExecutor(std::vector<MXTensor>* inputs,
std::vector<void*> to_free;

// loop over nodes
for(int i=0; i<nodes.list.size(); i++) {
JsonVal node = nodes.list[i];
for(auto node : nodes.list) {
// get the op name
std::string op = node.map[JsonVal("op")].str;
// get node ID inputs to op
Expand All @@ -84,7 +84,7 @@ MXReturnValue myExecutor(std::vector<MXTensor>* inputs,
// get input tensor based on node ID inputs from data storage
MXTensor &input = data[node_inputs.list[0].list[0].num];
// create temporary storage
MXTensor tmp(malloc(input.size()*4), input.shape, input.dtype, 0, MXContext::CPU(0), kDefaultStorage);
MXTensor tmp(malloc(input.size()*4), input.shape, input.dtype, 0, MXContext::CPU(0), kDefaultStorage); // NOLINT
// save allocated ptr to free later
to_free.push_back(tmp.data_ptr);
// execute log operator
Expand All @@ -95,7 +95,7 @@ MXReturnValue myExecutor(std::vector<MXTensor>* inputs,
// get input tensor based on node ID inputs from data storage
MXTensor &input = data[node_inputs.list[0].list[0].num];
// create temporary storage
MXTensor tmp(malloc(input.size()*4), input.shape, input.dtype, 0, MXContext::CPU(0), kDefaultStorage);
MXTensor tmp(malloc(input.size()*4), input.shape, input.dtype, 0, MXContext::CPU(0), kDefaultStorage); // NOLINT
// save allocated ptr to free later
to_free.push_back(tmp.data_ptr);
// execute exp operator
Expand All @@ -106,7 +106,7 @@ MXReturnValue myExecutor(std::vector<MXTensor>* inputs,
std::cout << "Error! Unsupported op '" << op << "' found in myExecutor";
// free allocated temporary storage
for (void* ptr : to_free)
free(ptr);
free(ptr); // NOLINT
return MX_FAIL;
}
}
Expand All @@ -129,25 +129,25 @@ MXReturnValue myExecutor(std::vector<MXTensor>* inputs,

// free allocated temporary storage
for (void* ptr : to_free) {
free(ptr);
free(ptr); // NOLINT
}

return MX_SUCCESS;
}

class MyStatefulOp : public CustomStatefulOp {
public:
explicit MyStatefulOp(const std::string& sym,
explicit MyStatefulOp(std::string sym,
const std::unordered_map<std::string, std::string>& attrs)
: subgraph_sym(sym), attrs_(attrs) {
: subgraph_sym(std::move(sym)), attrs_(attrs) {
for (auto kv : attrs) {
std::cout << "subgraphOp attributes: " << kv.first << " ==> " << kv.second << std::endl;
}
}

MXReturnValue Forward(std::vector<MXTensor>* inputs,
std::vector<MXTensor>* outputs,
const OpResource& op_res) {
const OpResource& op_res) override {
return myExecutor(inputs, outputs, subgraph_sym);
}

Expand Down Expand Up @@ -299,20 +299,20 @@ class MySelector : public CustomOpSelector {
}
return false;
}
virtual bool Select(int nodeID) {
bool Select(int nodeID) override {
return chooseNode(nodeID);
}
virtual bool SelectInput(int nodeID, int input_nodeID) {
bool SelectInput(int nodeID, int input_nodeID) override {
return chooseNode(input_nodeID);
}
virtual bool SelectOutput(int nodeID, int output_nodeID) {
bool SelectOutput(int nodeID, int output_nodeID) override {
return chooseNode(output_nodeID);
}
virtual void Filter(std::vector<int>& candidates,
std::vector<int>& keep) {
keep.insert(keep.end(), candidates.begin(), candidates.end());
}
virtual void Reset() {}
void Reset() override {}
private:
std::string graph_json;
JsonVal nodes;
Expand Down
2 changes: 1 addition & 1 deletion src/api/operator/numpy/linalg/np_norm.cc
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ MXNET_REGISTER_API("_npi.norm")
param.flag = args[4].operator int();

attrs.op = op;
attrs.parsed = std::move(param);
attrs.parsed = param;
SetAttrDict<op::NumpyNormParam>(&attrs);

// inputs
Expand Down
Loading

0 comments on commit 6bbd531

Please sign in to comment.