From 6bbd53107aa16fc41e8d462cf5dc46fb70d592df Mon Sep 17 00:00:00 2001 From: Leonard Lausen Date: Wed, 29 Jul 2020 20:31:19 +0000 Subject: [PATCH] Update clang-tidy integration (#18815) Run clang-tidy via cmake only on the code managed by mxnet (and not 3rdparty dependencies), update to clang-tidy-10 and run clang-tidy-10 -fix to fix all the warnings that are enforced on CI. Developers can run clang-tidy by specifying the -DCMAKE_CXX_CLANG_TIDY="clang-tidy-10" to cmake, or using the python ci/build.py -R --platform ubuntu_cpu /work/runtime_functions.sh build_ubuntu_cpu_clang_tidy script. --- .clang-tidy | 52 +++++++------------ CMakeLists.txt | 13 ++++- ci/docker/Dockerfile.build.ubuntu | 2 +- ci/docker/runtime_functions.sh | 7 +-- example/extensions/lib_custom_op/gemm_lib.cc | 11 ++-- .../lib_custom_op/transposecsr_lib.cc | 9 ++-- .../lib_custom_op/transposerowsp_lib.cc | 9 ++-- example/extensions/lib_pass/pass_lib.cc | 5 +- .../extensions/lib_subgraph/subgraph_lib.cc | 28 +++++----- src/api/operator/numpy/linalg/np_norm.cc | 2 +- .../numpy/np_broadcast_reduce_op_boolean.cc | 4 +- .../numpy/np_broadcast_reduce_op_index.cc | 4 +- .../numpy/np_broadcast_reduce_op_value.cc | 16 +++--- src/api/operator/numpy/np_cumsum.cc | 2 +- src/api/operator/numpy/np_delete_op.cc | 2 +- src/api/operator/numpy/np_ediff1d_op.cc | 2 +- src/api/operator/numpy/np_einsum_op.cc | 2 +- .../numpy/np_elemwise_unary_op_basic.cc | 2 +- src/api/operator/numpy/np_fill_diagonal_op.cc | 2 +- src/api/operator/numpy/np_histogram_op.cc | 2 +- src/api/operator/numpy/np_init_op.cc | 20 +++---- src/api/operator/numpy/np_insert_op.cc | 6 +-- src/api/operator/numpy/np_interp_op.cc | 4 +- src/api/operator/numpy/np_matrix_op.cc | 26 +++++----- src/api/operator/numpy/np_moments_op.cc | 6 +-- src/api/operator/numpy/np_nan_to_num_op.cc | 2 +- src/api/operator/numpy/np_pad_op.cc | 2 +- src/api/operator/numpy/np_percentile_op.cc | 4 +- src/api/operator/numpy/np_repeat_op.cc | 2 +- src/api/operator/numpy/np_tensordot_op.cc | 2 +- src/api/operator/numpy/np_unique_op.cc | 2 +- src/api/operator/numpy/np_window_op.cc | 2 +- src/api/operator/numpy/random/np_choice_op.cc | 2 +- .../numpy/random/np_exponential_op.cc | 2 +- .../operator/numpy/random/np_laplace_op.cc | 2 +- .../numpy/random/np_location_scale_op.cc | 4 +- src/api/operator/numpy/random/np_pareto_op.cc | 2 +- src/api/operator/numpy/random/np_power_op.cc | 2 +- .../operator/numpy/random/np_rayleigh_op.cc | 2 +- .../operator/numpy/random/np_weibull_op.cc | 2 +- src/api/operator/random/np_gamma_op.cc | 2 +- src/api/operator/random/np_normal_op.cc | 2 +- src/api/operator/random/np_uniform_op.cc | 2 +- src/api/operator/tensor/matrix_op.cc | 2 +- src/c_api/c_api.cc | 20 +++---- src/c_api/c_api_profile.cc | 2 +- src/c_api/c_api_symbolic.cc | 22 ++++---- src/engine/naive_engine.cc | 18 ++++--- src/engine/threaded_engine_perdevice.cc | 28 +++++----- src/engine/threaded_engine_pooled.cc | 13 ++--- src/imperative/attach_op_execs_pass.cc | 29 ++++++----- src/imperative/cached_op.cc | 6 +-- src/imperative/cached_op_threadsafe.cc | 2 +- src/imperative/eliminate_common_expr_pass.cc | 8 +-- src/imperative/imperative.cc | 8 +-- src/initialize.cc | 5 +- src/io/batchify.cc | 16 +++--- src/io/dataloader.cc | 16 +++--- src/io/dataset.cc | 46 ++++++++-------- src/io/image_aug_default.cc | 2 +- src/io/image_det_aug_default.cc | 2 +- src/io/iter_csv.cc | 30 +++++------ src/io/iter_image_det_recordio.cc | 19 +++---- src/io/iter_image_recordio.cc | 19 +++---- src/io/iter_image_recordio_2.cc | 46 ++++++++-------- src/io/iter_libsvm.cc | 16 +++--- src/io/iter_mnist.cc | 22 ++++---- src/io/iter_sampler.cc | 23 ++++---- src/kvstore/kvstore.cc | 3 +- src/nnvm/gradient.cc | 9 ++-- src/nnvm/graph_editor.cc | 4 +- src/nnvm/low_precision_pass.cc | 15 +++--- src/nnvm/tvm_bridge.cc | 3 +- src/operator/contrib/boolean_mask.cc | 4 +- src/operator/contrib/dgl_graph.cc | 49 ++++++++--------- src/operator/contrib/multi_proposal.cc | 8 +-- src/operator/contrib/proposal.cc | 8 +-- src/operator/contrib/rroi_align.cc | 2 +- src/operator/control_flow.cc | 12 +++-- src/operator/leaky_relu.cc | 2 +- src/operator/numpy/np_einsum_op.cc | 4 +- src/operator/numpy/np_indexing_op.cc | 4 +- src/operator/numpy/np_polynomial_op.cc | 2 +- src/operator/operator_tune.cc | 2 +- .../quantization/quantize_graph_pass.cc | 7 ++- .../quantization/quantized_elemwise_mul.cc | 4 +- .../quantization/quantized_fully_connected.cc | 2 +- .../subgraph/default_subgraph_property.cc | 16 +++--- .../subgraph/default_subgraph_property_v2.cc | 4 +- src/profiler/aggregate_stats.cc | 4 +- src/profiler/profiler.cc | 6 +-- src/resource.cc | 20 +++---- src/runtime/registry.cc | 2 +- src/storage/storage.cc | 2 +- tests/cpp/engine/thread_local_test.cc | 2 +- tests/cpp/engine/threaded_engine_test.cc | 8 +-- tests/cpp/operator/batchnorm_test.cc | 21 ++++---- .../operator/runner/core_op_runner_test.cc | 10 ++-- tests/cpp/storage/storage_test.cc | 2 +- tools/im2rec.cc | 2 +- 100 files changed, 479 insertions(+), 462 deletions(-) diff --git a/.clang-tidy b/.clang-tidy index 25fbff9f1a4d..b0f28e29b7c3 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -17,45 +17,33 @@ # The checks defined here will be run and will display by default as warnings. Checks: > - -*, cppcoreguidelines-c-copy-assignment-signature, - cppcoreguidelines-interfaces-global-init, cppcoreguidelines-no-malloc, - cppcoreguidelines-pro-bounds-constant-array-index, cppcoreguidelines-pro-type-const-cast, - cppcoreguidelines-pro-type-cstyle-cast, cppcoreguidelines-pro-type-member-init, - cppcoreguidelines-pro-type-static-cast-downcast, cppcoreguidelines-pro-type-union-access, - cppcoreguidelines-pro-type-vararg, cppcoreguidelines-slicing, - cppcoreguidelines-special-member-functions, clang-analyzer-security.FloatLoopCounter, - clang-analyzer-security.insecureAPI.*, clang-analyzer-core.CallAndMessage, - clang-analyzer-core.DivideZero, clang-analyzer-core.DynamicTypePropagation, - clang-analyzer-core.NonNullParamChecker, clang-analyzer-core.NullDereference, - clang-analyzer-core.StackAddressEscape, clang-analyzer-core.UndefinedBinaryOperatorResult, - clang-analyzer-core.VLASize, clang-analyzer-core.builtin.BuiltinFunctions, - clang-analyzer-core.builtin.NoReturnFunctions, clang-analyzer-core.uninitialized.ArraySubscript, - clang-analyzer-core.uninitialized.Assign, clang-analyzer-core.uninitialized.Branch, - clang-analyzer-core.uninitialized.CapturedBlockVariable, - clang-analyzer-core.uninitialized.UndefReturn, clang-analyzer-cplusplus.NewDelete, - clang-analyzer-cplusplus.NewDeleteLeaks, clang-analyzer-cplusplus.SelfAssignment, - clang-analyzer-deadcode.DeadStores, modernize-avoid-bind, modernize-deprecated-headers, - modernize-loop-convert, modernize-make-shared, modernize-pass-by-value, + -*, cppcoreguidelines-* clang-analyzer-*, modernize-*, + performance-faster-string-find, performance-for-range-copy, + performance-implicit-conversion-in-loop, performance-inefficient-algorithm, + performance-inefficient-string-concatenation, performance-trivially-destructible, + performance-inefficient-vector-operation, performance-move-const-arg, + performance-move-constructor-init, performance-noexcept-move-constructor, + performance-no-automatic-move, performance-unnecessary-copy-initialization, + performance-type-promotion-in-math-fn + +# performance checks not enabled due to segmentation fault in clang-tidy v8+: +# performance-unnecessary-value-param + +# In order to trigger an error, you must have a rule defined both in checks and in this section. +WarningsAsErrors: > + cppcoreguidelines-no-malloc, modernize-deprecated-headers, + modernize-loop-convert, modernize-make-shared, modernize-pass-by-value, modernize-make-unique, modernize-raw-string-literal, modernize-redundant-void-arg, modernize-replace-auto-ptr, modernize-replace-random-shuffle, modernize-return-braced-init-list, modernize-shrink-to-fit, modernize-unary-static-assert, modernize-use-bool-literals, modernize-use-default-member-init, modernize-use-emplace, modernize-use-equals-default, modernize-use-equals-delete, modernize-use-noexcept, modernize-use-nullptr, modernize-use-override, - modernize-use-transparent-functors, modernize-use-using, performance-* + modernize-use-transparent-functors, modernize-use-using, + performance-unnecessary-copy-initialization, performance-move-const-arg -# cppcoreguidelines checks not enabled: -# cppcoreguidelines-pro-bounds-pointer-arithmetic -# cppcoreguidelines-pro-bounds-array-to-pointer-decay -# cppcoreguidelines-pro-type-reinterpret-cast - -# modernize checks not enabled: +# modernize checks not enforced: # modernize-use-auto -# modernize-make-unique (C++14 and newer only) - -# In order to trigger an error, you must have a rule defined both in checks and in this section. -WarningsAsErrors: > - cppcoreguidelines-no-malloc, modernize-use-nullptr, performance-unnecessary-copy-initialization, - modernize-use-emplace, performance-move-const-arg +# modernize-avoid-bind # Todo: define a better regex match that includes most project headers, but excludes third party # code. diff --git a/CMakeLists.txt b/CMakeLists.txt index 162eeeade385..5f1a5106a95d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -275,6 +275,7 @@ if(USE_MKLDNN) include_directories(${PROJECT_BINARY_DIR}/3rdparty/mkldnn/include) add_definitions(-DMXNET_USE_MKLDNN=1) list(APPEND mxnet_LINKER_LIBS dnnl) + set_target_properties(dnnl PROPERTIES CXX_CLANG_TIDY "") # don't lint 3rdparty dependency endif() # Allow Cuda compiles outside of src tree to find things in 'src' and 'include' @@ -405,6 +406,7 @@ if(USE_OPENMP) AND NOT CMAKE_CROSSCOMPILING) load_omp() list(APPEND mxnet_LINKER_LIBS omp) + set_target_properties(omp PROPERTIES CXX_CLANG_TIDY "") # don't lint 3rdparty dependency if(UNIX) list(APPEND mxnet_LINKER_LIBS pthread) endif() @@ -462,6 +464,8 @@ set(GTEST_MAIN_LIBRARY gtest_main) set(GTEST_LIBRARY gtest) add_subdirectory(${GTEST_ROOT}) +set_target_properties(gtest PROPERTIES CXX_CLANG_TIDY "") # don't lint 3rdparty dependency +set_target_properties(gtest_main PROPERTIES CXX_CLANG_TIDY "") # don't lint 3rdparty dependency find_package(GTest REQUIRED) # cudnn detection @@ -478,6 +482,7 @@ endif() if(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/3rdparty/dmlc-core/cmake) add_subdirectory("3rdparty/dmlc-core") + set_target_properties(dmlc PROPERTIES CXX_CLANG_TIDY "") # don't lint 3rdparty dependency endif() FILE(GLOB_RECURSE SOURCE "src/*.cc" "src/*.h" "include/*.h") @@ -492,7 +497,9 @@ FILE(GLOB_RECURSE NNVMSOURCE 3rdparty/tvm/nnvm/src/core/*.h 3rdparty/tvm/nnvm/src/pass/*.h 3rdparty/tvm/nnvm/include/*.h) -list(APPEND SOURCE ${NNVMSOURCE}) +add_library(nnvm OBJECT ${NNVMSOURCE}) +set_target_properties(nnvm PROPERTIES CXX_CLANG_TIDY "") # don't lint 3rdparty dependency +list(APPEND SOURCE $) # add source group FILE(GLOB_RECURSE GROUP_SOURCE "src/*.cc" "3rdparty/tvm/nnvm/*.cc" "plugin/*.cc") @@ -743,6 +750,7 @@ if(USE_DIST_KVSTORE) add_subdirectory("3rdparty/ps-lite") add_definitions(-DMXNET_USE_DIST_KVSTORE) list(APPEND mxnet_LINKER_LIBS pslite) + set_target_properties(pslite PROPERTIES CXX_CLANG_TIDY "") # don't lint 3rdparty dependency endif() if(USE_MKLDNN) @@ -757,6 +765,9 @@ function(BuildTVMOP) # scope the variables in BuildTVM.cmake to avoid conflict include(cmake/BuildTVM.cmake) add_subdirectory("3rdparty/tvm") + set_target_properties(tvm PROPERTIES CXX_CLANG_TIDY "") # don't lint 3rdparty dependency + set_target_properties(tvm_topi PROPERTIES CXX_CLANG_TIDY "") # don't lint 3rdparty dependency + set_target_properties(tvm_runtime PROPERTIES CXX_CLANG_TIDY "") # don't lint 3rdparty dependency endfunction() if(USE_TVM_OP) diff --git a/ci/docker/Dockerfile.build.ubuntu b/ci/docker/Dockerfile.build.ubuntu index c9ec3f5a04fc..8398dc9bee54 100644 --- a/ci/docker/Dockerfile.build.ubuntu +++ b/ci/docker/Dockerfile.build.ubuntu @@ -53,9 +53,9 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ protobuf-compiler \ libprotobuf-dev \ clang-6.0 \ - clang-tidy-6.0 \ python-yaml \ clang-10 \ + clang-tidy-10 \ g++ \ g++-8 \ intel-mkl-2020.0-088 \ diff --git a/ci/docker/runtime_functions.sh b/ci/docker/runtime_functions.sh index e175d33e11f8..76b723c5d919 100755 --- a/ci/docker/runtime_functions.sh +++ b/ci/docker/runtime_functions.sh @@ -462,9 +462,8 @@ build_ubuntu_cpu_clang100() { build_ubuntu_cpu_clang_tidy() { set -ex cd /work/build - export CLANG_TIDY=/usr/lib/llvm-6.0/share/clang/run-clang-tidy.py # TODO(leezu) USE_OPENMP=OFF 3rdparty/dmlc-core/CMakeLists.txt:79 broken? - CXX=clang++-6.0 CC=clang-6.0 cmake \ + CXX=clang++-10 CC=clang-10 cmake \ -DUSE_MKL_IF_AVAILABLE=OFF \ -DUSE_MKLDNN=OFF \ -DUSE_CUDA=OFF \ @@ -472,11 +471,9 @@ build_ubuntu_cpu_clang_tidy() { -DCMAKE_BUILD_TYPE=Debug \ -DUSE_DIST_KVSTORE=ON \ -DUSE_CPP_PACKAGE=ON \ - -DCMAKE_EXPORT_COMPILE_COMMANDS=ON \ + -DCMAKE_CXX_CLANG_TIDY=clang-tidy-10 \ -G Ninja /work/mxnet ninja - cd /work/mxnet - $CLANG_TIDY -p /work/build -j $(nproc) -clang-tidy-binary clang-tidy-6.0 /work/mxnet/src } build_ubuntu_cpu_clang6_mkldnn() { diff --git a/example/extensions/lib_custom_op/gemm_lib.cc b/example/extensions/lib_custom_op/gemm_lib.cc index 4f8dabadc6a1..764ac49d9942 100644 --- a/example/extensions/lib_custom_op/gemm_lib.cc +++ b/example/extensions/lib_custom_op/gemm_lib.cc @@ -24,6 +24,7 @@ */ #include +#include #include "lib_api.h" // main matrix multiplication routine @@ -179,23 +180,23 @@ REGISTER_OP(my_gemm) class MyStatefulGemm : public CustomStatefulOp { public: explicit MyStatefulGemm(int count, - const std::unordered_map& attrs) - : count(count), attrs_(attrs) {} + std::unordered_map attrs) + : count(count), attrs_(std::move(attrs)) {} MXReturnValue Forward(std::vector* inputs, std::vector* outputs, - const OpResource& op_res) { + const OpResource& op_res) override { std::cout << "Info: keyword + number of forward: " << ++count << std::endl; return forward(attrs_, inputs, outputs, op_res); } MXReturnValue Backward(std::vector* inputs, std::vector* outputs, - const OpResource& op_res) { + const OpResource& op_res) override { return backward(attrs_, inputs, outputs, op_res); } - ~MyStatefulGemm() {} + ~MyStatefulGemm() = default; private: int count; diff --git a/example/extensions/lib_custom_op/transposecsr_lib.cc b/example/extensions/lib_custom_op/transposecsr_lib.cc index 224cd6aa81b6..fc80751e47be 100644 --- a/example/extensions/lib_custom_op/transposecsr_lib.cc +++ b/example/extensions/lib_custom_op/transposecsr_lib.cc @@ -24,6 +24,7 @@ */ #include +#include #include "lib_api.h" void transpose(MXTensor& src, MXTensor& dst, const OpResource& res) { @@ -151,19 +152,19 @@ REGISTER_OP(my_transposecsr) class MyStatefulTransposeCSR : public CustomStatefulOp { public: explicit MyStatefulTransposeCSR(int count, - const std::unordered_map& attrs) - : count(count), attrs_(attrs) {} + std::unordered_map attrs) + : count(count), attrs_(std::move(attrs)) {} MXReturnValue Forward(std::vector* inputs, std::vector* outputs, - const OpResource& op_res) { + const OpResource& op_res) override { std::cout << "Info: keyword + number of forward: " << ++count << std::endl; return forward(attrs_, inputs, outputs, op_res); } MXReturnValue Backward(std::vector* inputs, std::vector* outputs, - const OpResource& op_res) { + const OpResource& op_res) override { return backward(attrs_, inputs, outputs, op_res); } diff --git a/example/extensions/lib_custom_op/transposerowsp_lib.cc b/example/extensions/lib_custom_op/transposerowsp_lib.cc index 46d3c4d41a4c..5b6f0f394dc9 100644 --- a/example/extensions/lib_custom_op/transposerowsp_lib.cc +++ b/example/extensions/lib_custom_op/transposerowsp_lib.cc @@ -24,6 +24,7 @@ */ #include +#include #include "lib_api.h" void transpose(MXTensor& src, MXTensor& dst, const OpResource& res) { @@ -153,19 +154,19 @@ REGISTER_OP(my_transposerowsp) class MyStatefulTransposeRowSP : public CustomStatefulOp { public: explicit MyStatefulTransposeRowSP(int count, - const std::unordered_map& attrs) - : count(count), attrs_(attrs) {} + std::unordered_map attrs) + : count(count), attrs_(std::move(attrs)) {} MXReturnValue Forward(std::vector* inputs, std::vector* outputs, - const OpResource& op_res) { + const OpResource& op_res) override { std::cout << "Info: keyword + number of forward: " << ++count << std::endl; return forward(attrs_, inputs, outputs, op_res); } MXReturnValue Backward(std::vector* inputs, std::vector* outputs, - const OpResource& op_res) { + const OpResource& op_res) override { return backward(attrs_, inputs, outputs, op_res); } diff --git a/example/extensions/lib_pass/pass_lib.cc b/example/extensions/lib_pass/pass_lib.cc index bbdcd73a7a0b..ca77b59bfa06 100644 --- a/example/extensions/lib_pass/pass_lib.cc +++ b/example/extensions/lib_pass/pass_lib.cc @@ -23,7 +23,7 @@ * \brief subgraph operator implementation library file */ -#include +#include #include #include #include "lib_api.h" @@ -67,8 +67,7 @@ MXReturnValue jsonPass(const std::string& in_graph, const std::string** out_grap JsonVal nodes = json_val.map[JsonVal("nodes")]; // loop over nodes - for(int i=0; i +#include #include #include +#include #include "lib_api.h" /* function to execute log operator on floats */ @@ -69,8 +70,7 @@ MXReturnValue myExecutor(std::vector* inputs, std::vector to_free; // loop over nodes - for(int i=0; i* inputs, // get input tensor based on node ID inputs from data storage MXTensor &input = data[node_inputs.list[0].list[0].num]; // create temporary storage - MXTensor tmp(malloc(input.size()*4), input.shape, input.dtype, 0, MXContext::CPU(0), kDefaultStorage); + MXTensor tmp(malloc(input.size()*4), input.shape, input.dtype, 0, MXContext::CPU(0), kDefaultStorage); // NOLINT // save allocated ptr to free later to_free.push_back(tmp.data_ptr); // execute log operator @@ -95,7 +95,7 @@ MXReturnValue myExecutor(std::vector* inputs, // get input tensor based on node ID inputs from data storage MXTensor &input = data[node_inputs.list[0].list[0].num]; // create temporary storage - MXTensor tmp(malloc(input.size()*4), input.shape, input.dtype, 0, MXContext::CPU(0), kDefaultStorage); + MXTensor tmp(malloc(input.size()*4), input.shape, input.dtype, 0, MXContext::CPU(0), kDefaultStorage); // NOLINT // save allocated ptr to free later to_free.push_back(tmp.data_ptr); // execute exp operator @@ -106,7 +106,7 @@ MXReturnValue myExecutor(std::vector* inputs, std::cout << "Error! Unsupported op '" << op << "' found in myExecutor"; // free allocated temporary storage for (void* ptr : to_free) - free(ptr); + free(ptr); // NOLINT return MX_FAIL; } } @@ -129,7 +129,7 @@ MXReturnValue myExecutor(std::vector* inputs, // free allocated temporary storage for (void* ptr : to_free) { - free(ptr); + free(ptr); // NOLINT } return MX_SUCCESS; @@ -137,9 +137,9 @@ MXReturnValue myExecutor(std::vector* inputs, class MyStatefulOp : public CustomStatefulOp { public: - explicit MyStatefulOp(const std::string& sym, + explicit MyStatefulOp(std::string sym, const std::unordered_map& attrs) - : subgraph_sym(sym), attrs_(attrs) { + : subgraph_sym(std::move(sym)), attrs_(attrs) { for (auto kv : attrs) { std::cout << "subgraphOp attributes: " << kv.first << " ==> " << kv.second << std::endl; } @@ -147,7 +147,7 @@ class MyStatefulOp : public CustomStatefulOp { MXReturnValue Forward(std::vector* inputs, std::vector* outputs, - const OpResource& op_res) { + const OpResource& op_res) override { return myExecutor(inputs, outputs, subgraph_sym); } @@ -299,20 +299,20 @@ class MySelector : public CustomOpSelector { } return false; } - virtual bool Select(int nodeID) { + bool Select(int nodeID) override { return chooseNode(nodeID); } - virtual bool SelectInput(int nodeID, int input_nodeID) { + bool SelectInput(int nodeID, int input_nodeID) override { return chooseNode(input_nodeID); } - virtual bool SelectOutput(int nodeID, int output_nodeID) { + bool SelectOutput(int nodeID, int output_nodeID) override { return chooseNode(output_nodeID); } virtual void Filter(std::vector& candidates, std::vector& keep) { keep.insert(keep.end(), candidates.begin(), candidates.end()); } - virtual void Reset() {} + void Reset() override {} private: std::string graph_json; JsonVal nodes; diff --git a/src/api/operator/numpy/linalg/np_norm.cc b/src/api/operator/numpy/linalg/np_norm.cc index 708b08d9e6bd..1928321ad206 100644 --- a/src/api/operator/numpy/linalg/np_norm.cc +++ b/src/api/operator/numpy/linalg/np_norm.cc @@ -44,7 +44,7 @@ MXNET_REGISTER_API("_npi.norm") param.flag = args[4].operator int(); attrs.op = op; - attrs.parsed = std::move(param); + attrs.parsed = param; SetAttrDict(&attrs); // inputs diff --git a/src/api/operator/numpy/np_broadcast_reduce_op_boolean.cc b/src/api/operator/numpy/np_broadcast_reduce_op_boolean.cc index dea510a41608..c3e186195dca 100644 --- a/src/api/operator/numpy/np_broadcast_reduce_op_boolean.cc +++ b/src/api/operator/numpy/np_broadcast_reduce_op_boolean.cc @@ -48,7 +48,7 @@ MXNET_REGISTER_API("_npi.all") param.keepdims = args[2].operator bool(); NDArray* inputs[] = {args[0].operator mxnet::NDArray*()}; int num_inputs = 1; - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; SetAttrDict(&attrs); auto ndoutputs = Invoke(op, &attrs, num_inputs, inputs, &num_outputs, outputs); @@ -79,7 +79,7 @@ MXNET_REGISTER_API("_npi.any") param.keepdims = args[2].operator bool(); NDArray* inputs[] = {args[0].operator mxnet::NDArray*()}; int num_inputs = 1; - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; SetAttrDict(&attrs); auto ndoutputs = Invoke(op, &attrs, num_inputs, inputs, &num_outputs, outputs); diff --git a/src/api/operator/numpy/np_broadcast_reduce_op_index.cc b/src/api/operator/numpy/np_broadcast_reduce_op_index.cc index aa24246f693d..83e16999417b 100644 --- a/src/api/operator/numpy/np_broadcast_reduce_op_index.cc +++ b/src/api/operator/numpy/np_broadcast_reduce_op_index.cc @@ -44,7 +44,7 @@ MXNET_REGISTER_API("_npi.argmax") // param.keepdims param.keepdims = args[2].operator bool(); - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; SetAttrDict(&attrs); // inputs @@ -77,7 +77,7 @@ MXNET_REGISTER_API("_npi.argmin") // param.keepdims param.keepdims = args[2].operator bool(); - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; SetAttrDict(&attrs); // inputs diff --git a/src/api/operator/numpy/np_broadcast_reduce_op_value.cc b/src/api/operator/numpy/np_broadcast_reduce_op_value.cc index a3a45d2d3777..0fe7fc441209 100644 --- a/src/api/operator/numpy/np_broadcast_reduce_op_value.cc +++ b/src/api/operator/numpy/np_broadcast_reduce_op_value.cc @@ -40,7 +40,7 @@ MXNET_REGISTER_API("_npi.broadcast_to") } else { param.shape = TShape(args[1].operator ObjectRef()); } - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; SetAttrDict(&attrs); @@ -91,7 +91,7 @@ MXNET_REGISTER_API("_npi.sum") param.initial = args[4].operator double(); } - attrs.parsed = std::move(param); + attrs.parsed = param; SetAttrDict(&attrs); @@ -133,7 +133,7 @@ MXNET_REGISTER_API("_npi.mean") param.keepdims = args[3].operator bool(); } param.initial = dmlc::optional(); - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; SetAttrDict(&attrs); int num_inputs = 1; @@ -177,7 +177,7 @@ MXNET_REGISTER_API("_npi.prod") } else { param.initial = args[4].operator double(); } - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; SetAttrDict(&attrs); int num_inputs = 1; @@ -213,7 +213,7 @@ MXNET_REGISTER_API("_npi.max") param.keepdims = args[2].operator bool(); NDArray* inputs[] = {args[0].operator mxnet::NDArray*()}; int num_inputs = 1; - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; SetAttrDict(&attrs); auto ndoutputs = Invoke(op, &attrs, num_inputs, inputs, &num_outputs, outputs); @@ -244,7 +244,7 @@ MXNET_REGISTER_API("_npi.min") param.keepdims = args[2].operator bool(); NDArray* inputs[] = {args[0].operator mxnet::NDArray*()}; int num_inputs = 1; - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; SetAttrDict(&attrs); auto ndoutputs = Invoke(op, &attrs, num_inputs, inputs, &num_outputs, outputs); @@ -275,7 +275,7 @@ MXNET_REGISTER_API("_npi.amax") param.keepdims = args[2].operator bool(); NDArray* inputs[] = {args[0].operator mxnet::NDArray*()}; int num_inputs = 1; - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; SetAttrDict(&attrs); auto ndoutputs = Invoke(op, &attrs, num_inputs, inputs, &num_outputs, outputs); @@ -306,7 +306,7 @@ MXNET_REGISTER_API("_npi.amin") param.keepdims = args[2].operator bool(); NDArray* inputs[] = {args[0].operator mxnet::NDArray*()}; int num_inputs = 1; - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; SetAttrDict(&attrs); auto ndoutputs = Invoke(op, &attrs, num_inputs, inputs, &num_outputs, outputs); diff --git a/src/api/operator/numpy/np_cumsum.cc b/src/api/operator/numpy/np_cumsum.cc index d0b200c66fd4..a0f68cca1b6b 100644 --- a/src/api/operator/numpy/np_cumsum.cc +++ b/src/api/operator/numpy/np_cumsum.cc @@ -46,7 +46,7 @@ MXNET_REGISTER_API("_npi.cumsum") } else { param.dtype = String2MXNetTypeWithBool(args[2].operator std::string()); } - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; SetAttrDict(&attrs); // inputs diff --git a/src/api/operator/numpy/np_delete_op.cc b/src/api/operator/numpy/np_delete_op.cc index 925c8b568e28..f374b2318c9c 100644 --- a/src/api/operator/numpy/np_delete_op.cc +++ b/src/api/operator/numpy/np_delete_op.cc @@ -90,7 +90,7 @@ MXNET_REGISTER_API("_npi.delete") for (int i = 0; i < num_inputs; ++i) { inputs.push_back(args[i].operator mxnet::NDArray*()); } - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; SetAttrDict(&attrs); int num_outputs = 0; diff --git a/src/api/operator/numpy/np_ediff1d_op.cc b/src/api/operator/numpy/np_ediff1d_op.cc index df97fd8b68b9..64e15064889a 100644 --- a/src/api/operator/numpy/np_ediff1d_op.cc +++ b/src/api/operator/numpy/np_ediff1d_op.cc @@ -63,7 +63,7 @@ MXNET_REGISTER_API("_npi.ediff1d") num_inputs++; } - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; SetAttrDict(&attrs); diff --git a/src/api/operator/numpy/np_einsum_op.cc b/src/api/operator/numpy/np_einsum_op.cc index a5b8339a619e..900739ac10ab 100644 --- a/src/api/operator/numpy/np_einsum_op.cc +++ b/src/api/operator/numpy/np_einsum_op.cc @@ -43,7 +43,7 @@ MXNET_REGISTER_API("_npi.einsum") // param.optimize param.optimize = args[args_size - 1].operator int(); - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; SetAttrDict(&attrs); diff --git a/src/api/operator/numpy/np_elemwise_unary_op_basic.cc b/src/api/operator/numpy/np_elemwise_unary_op_basic.cc index 720ab371fae4..9840fc4bd313 100644 --- a/src/api/operator/numpy/np_elemwise_unary_op_basic.cc +++ b/src/api/operator/numpy/np_elemwise_unary_op_basic.cc @@ -98,7 +98,7 @@ MXNET_REGISTER_API("_npi.around") nnvm::NodeAttrs attrs; op::AroundParam param; param.decimals = args[1].operator int64_t(); - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; SetAttrDict(&attrs); int num_inputs = 1; diff --git a/src/api/operator/numpy/np_fill_diagonal_op.cc b/src/api/operator/numpy/np_fill_diagonal_op.cc index 6f8959e9ff61..f087e7d2e608 100644 --- a/src/api/operator/numpy/np_fill_diagonal_op.cc +++ b/src/api/operator/numpy/np_fill_diagonal_op.cc @@ -44,7 +44,7 @@ MXNET_REGISTER_API("_npi.fill_diagonal") } param.wrap = args[2].operator bool(); - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; SetAttrDict(&attrs); diff --git a/src/api/operator/numpy/np_histogram_op.cc b/src/api/operator/numpy/np_histogram_op.cc index b517cce80803..fa911268e39b 100644 --- a/src/api/operator/numpy/np_histogram_op.cc +++ b/src/api/operator/numpy/np_histogram_op.cc @@ -49,7 +49,7 @@ MXNET_REGISTER_API("_npi.histogram") param.range = Obj2Tuple(args[3].operator ObjectRef()); } - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; SetAttrDict(&attrs); diff --git a/src/api/operator/numpy/np_init_op.cc b/src/api/operator/numpy/np_init_op.cc index 155035505d0f..b9ab8973d08e 100644 --- a/src/api/operator/numpy/np_init_op.cc +++ b/src/api/operator/numpy/np_init_op.cc @@ -47,7 +47,7 @@ MXNET_REGISTER_API("_npi.zeros") } else { param.dtype = String2MXNetTypeWithBool(args[1].operator std::string()); } - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; SetAttrDict(&attrs); if (args[2].type_code() != kNull) { @@ -70,7 +70,7 @@ MXNET_REGISTER_API("_npi.full_like") } else { param.dtype = String2MXNetTypeWithBool(args[2].operator std::string()); } - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; if (args[3].type_code() != kNull) { attrs.dict["ctx"] = args[3].operator std::string(); @@ -107,7 +107,7 @@ MXNET_REGISTER_API("_npi.indices") } else { param.dtype = String2MXNetTypeWithBool(args[1].operator std::string()); } - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; SetAttrDict(&attrs); // param.ctx @@ -223,7 +223,7 @@ MXNET_REGISTER_API("_npi.arange") } else { param.dtype = String2MXNetTypeWithBool(args[3].operator std::string()); } - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; SetAttrDict(&attrs); if (args[4].type_code() != kNull) { @@ -252,7 +252,7 @@ MXNET_REGISTER_API("_npi.eye") } else { param.dtype = String2MXNetTypeWithBool(args[4].operator std::string()); } - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; SetAttrDict(&attrs); if (args[3].type_code() != kNull) { @@ -282,7 +282,7 @@ MXNET_REGISTER_API("_npi.linspace") } else { param.dtype = String2MXNetTypeWithBool(args[5].operator std::string()); } - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; SetAttrDict(&attrs); if (args[4].type_code() != kNull) { @@ -317,7 +317,7 @@ MXNET_REGISTER_API("_npi.logspace") } else { param.dtype = String2MXNetTypeWithBool(args[6].operator std::string()); } - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; SetAttrDict(&attrs); if (args[5].type_code() != kNull) { @@ -344,7 +344,7 @@ MXNET_REGISTER_API("_npi.ones") } else { param.dtype = String2MXNetTypeWithBool(args[1].operator std::string()); } - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; if (args[2].type_code() != kNull) { attrs.dict["ctx"] = args[2].operator std::string(); @@ -372,7 +372,7 @@ MXNET_REGISTER_API("_npi.full") param.dtype = String2MXNetTypeWithBool(args[1].operator std::string()); } param.value = args[2].operator double(); - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; if (args[3].type_code() != kNull) { attrs.dict["ctx"] = args[3].operator std::string(); @@ -401,7 +401,7 @@ MXNET_REGISTER_API("_npi.identity") } else { param.dtype = String2MXNetTypeWithBool(args[1].operator std::string()); } - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; if (args[2].type_code() != kNull) { attrs.dict["ctx"] = args[2].operator std::string(); diff --git a/src/api/operator/numpy/np_insert_op.cc b/src/api/operator/numpy/np_insert_op.cc index 0de645e91913..7b2aeaa234f0 100644 --- a/src/api/operator/numpy/np_insert_op.cc +++ b/src/api/operator/numpy/np_insert_op.cc @@ -57,7 +57,7 @@ MXNET_REGISTER_API("_npi.insert_scalar") } else { param.axis = args[3].operator int(); } - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; SetAttrDict(&attrs); std::vector inputs; @@ -105,7 +105,7 @@ MXNET_REGISTER_API("_npi.insert_slice") } else { param.axis = args[5].operator int(); } - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; SetAttrDict(&attrs); std::vector inputs; @@ -141,7 +141,7 @@ MXNET_REGISTER_API("_npi.insert_tensor") } else { param.axis = args[3].operator int(); } - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; SetAttrDict(&attrs); std::vector inputs; diff --git a/src/api/operator/numpy/np_interp_op.cc b/src/api/operator/numpy/np_interp_op.cc index 7959383c9230..0b89373b1a88 100644 --- a/src/api/operator/numpy/np_interp_op.cc +++ b/src/api/operator/numpy/np_interp_op.cc @@ -53,7 +53,7 @@ MXNET_REGISTER_API("_npi.interp") param.x_scalar = args[2].operator double(); param.x_is_scalar = true; attrs.op = op; - attrs.parsed = std::move(param); + attrs.parsed = param; SetAttrDict(&attrs); NDArray* inputs[] = {args[0].operator mxnet::NDArray*(), args[1].operator mxnet::NDArray*()}; int num_inputs = 2; @@ -64,7 +64,7 @@ MXNET_REGISTER_API("_npi.interp") param.x_scalar = 0.0; param.x_is_scalar = false; attrs.op = op; - attrs.parsed = std::move(param); + attrs.parsed = param; SetAttrDict(&attrs); NDArray* inputs[] = {args[0].operator mxnet::NDArray*(), args[1].operator mxnet::NDArray*(), args[2].operator mxnet::NDArray*()}; diff --git a/src/api/operator/numpy/np_matrix_op.cc b/src/api/operator/numpy/np_matrix_op.cc index 45146ae0ab66..7b53c580683a 100644 --- a/src/api/operator/numpy/np_matrix_op.cc +++ b/src/api/operator/numpy/np_matrix_op.cc @@ -44,7 +44,7 @@ MXNET_REGISTER_API("_npi.transpose") } else { param.axes = TShape(args[1].operator ObjectRef()); } - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; SetAttrDict(&attrs); NDArray* inputs[] = {args[0].operator mxnet::NDArray*()}; @@ -92,7 +92,7 @@ MXNET_REGISTER_API("_npi.stack") param.num_args = i; param.axis = args[i].operator int64_t(); - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; SetAttrDict(&attrs); NDArray* out = args[i+1].operator mxnet::NDArray*(); @@ -126,7 +126,7 @@ MXNET_REGISTER_API("_npi.flip") } NDArray* inputs[] = {args[0].operator mxnet::NDArray*()}; int num_inputs = 1; - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; SetAttrDict(&attrs); auto ndoutputs = Invoke(op, &attrs, num_inputs, inputs, &num_outputs, outputs); @@ -150,7 +150,7 @@ MXNET_REGISTER_API("_npi.concatenate") } else { param.axis = args[arg_size - 2].operator int(); } - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; SetAttrDict(&attrs); int num_inputs = arg_size - 2; @@ -222,7 +222,7 @@ MXNET_REGISTER_API("_npi.split") } param.sections = 0; } - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; SetAttrDict(&attrs); @@ -256,7 +256,7 @@ MXNET_REGISTER_API("_npi.roll") } else { param.axis = TShape(args[2].operator ObjectRef()); } - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; SetAttrDict(&attrs); NDArray* inputs[] = {args[0].operator mxnet::NDArray*()}; @@ -280,7 +280,7 @@ MXNET_REGISTER_API("_npi.rot90") } else { param.axes = TShape(args[2].operator ObjectRef()); } - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; SetAttrDict(&attrs); NDArray* inputs[] = {args[0].operator mxnet::NDArray*()}; @@ -351,7 +351,7 @@ MXNET_REGISTER_API("_npi.array_split") } param.sections = 0; } - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; SetAttrDict(&attrs); NDArray* inputs[] = {args[0].operator mxnet::NDArray*()}; @@ -393,7 +393,7 @@ MXNET_REGISTER_API("_npi.dsplit") } param.sections = 0; } - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; SetAttrDict(&attrs); int num_outputs = 0; @@ -431,7 +431,7 @@ MXNET_REGISTER_API("_npi.hsplit") } param.sections = 0; } - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; SetAttrDict(&attrs); int num_outputs = 0; @@ -471,7 +471,7 @@ MXNET_REGISTER_API("_npi.vsplit") } param.sections = 0; } - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; SetAttrDict(&attrs); int num_outputs = 0; @@ -560,7 +560,7 @@ MXNET_REGISTER_API("_npi.diagflat") param.k = args[1].operator int(); int num_inputs = 1; int num_outputs = 0; - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; SetAttrDict(&attrs); NDArray* inputs[] = {args[0].operator mxnet::NDArray*()}; @@ -583,7 +583,7 @@ MXNET_REGISTER_API("_npi.squeeze") } int num_inputs = 1; int num_outputs = 0; - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; SetAttrDict(&attrs); NDArray* inputs[] = {args[0].operator mxnet::NDArray*()}; diff --git a/src/api/operator/numpy/np_moments_op.cc b/src/api/operator/numpy/np_moments_op.cc index e4e9238bb6c1..45dd45e8f4c9 100644 --- a/src/api/operator/numpy/np_moments_op.cc +++ b/src/api/operator/numpy/np_moments_op.cc @@ -65,7 +65,7 @@ MXNET_REGISTER_API("_npi.std") param.keepdims = args[4].operator bool(); } - attrs.parsed = std::move(param); + attrs.parsed = param; SetAttrDict(&attrs); @@ -120,7 +120,7 @@ MXNET_REGISTER_API("_npi.var") param.keepdims = args[4].operator bool(); } - attrs.parsed = std::move(param); + attrs.parsed = param; SetAttrDict(&attrs); @@ -168,7 +168,7 @@ MXNET_REGISTER_API("_npi.average") << "weighted cannot be None"; param.weighted = args[4].operator bool(); - attrs.parsed = std::move(param); + attrs.parsed = param; SetAttrDict(&attrs); diff --git a/src/api/operator/numpy/np_nan_to_num_op.cc b/src/api/operator/numpy/np_nan_to_num_op.cc index fadc4fe55dc7..65fd26e5432e 100644 --- a/src/api/operator/numpy/np_nan_to_num_op.cc +++ b/src/api/operator/numpy/np_nan_to_num_op.cc @@ -53,7 +53,7 @@ MXNET_REGISTER_API("_npi.nan_to_num") param.neginf = args[4].operator double(); } - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; SetAttrDict(&attrs); diff --git a/src/api/operator/numpy/np_pad_op.cc b/src/api/operator/numpy/np_pad_op.cc index 9c15ccc913b7..317076d23d48 100644 --- a/src/api/operator/numpy/np_pad_op.cc +++ b/src/api/operator/numpy/np_pad_op.cc @@ -73,7 +73,7 @@ MXNET_REGISTER_API("_npi.pad") param.reflect_type = args[4].operator std::string(); } attrs.op = op; - attrs.parsed = std::move(param); + attrs.parsed = param; SetAttrDict(&attrs); int num_inputs = 1; int num_outputs = 0; diff --git a/src/api/operator/numpy/np_percentile_op.cc b/src/api/operator/numpy/np_percentile_op.cc index 634ee092c64d..196cca9baaf9 100644 --- a/src/api/operator/numpy/np_percentile_op.cc +++ b/src/api/operator/numpy/np_percentile_op.cc @@ -70,7 +70,7 @@ MXNET_REGISTER_API("_npi.percentile") param.q_scalar = args[1].operator double(); NDArray* inputs[] = {args[0].operator mxnet::NDArray*()}; int num_inputs = 1; - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; SetAttrDict(&attrs); auto ndoutputs = Invoke(op, &attrs, num_inputs, inputs, &num_outputs, outputs); @@ -83,7 +83,7 @@ MXNET_REGISTER_API("_npi.percentile") param.q_scalar = dmlc::nullopt; NDArray* inputs[] = {args[0].operator mxnet::NDArray*(), args[1].operator mxnet::NDArray*()}; int num_inputs = 2; - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; SetAttrDict(&attrs); auto ndoutputs = Invoke(op, &attrs, num_inputs, inputs, &num_outputs, outputs); diff --git a/src/api/operator/numpy/np_repeat_op.cc b/src/api/operator/numpy/np_repeat_op.cc index c79fb8bbe03c..c98a1711050a 100644 --- a/src/api/operator/numpy/np_repeat_op.cc +++ b/src/api/operator/numpy/np_repeat_op.cc @@ -41,7 +41,7 @@ MXNET_REGISTER_API("_npi.repeats") } int num_inputs = 1; int num_outputs = 0; - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; SetAttrDict(&attrs); NDArray* inputs[] = {args[0].operator mxnet::NDArray*()}; diff --git a/src/api/operator/numpy/np_tensordot_op.cc b/src/api/operator/numpy/np_tensordot_op.cc index 55c131468b12..0cc74d9355e1 100644 --- a/src/api/operator/numpy/np_tensordot_op.cc +++ b/src/api/operator/numpy/np_tensordot_op.cc @@ -62,7 +62,7 @@ inline static void _npi_tensordot(runtime::MXNetArgs args, param.b_axes_summed = Tuple(adt[1]); } attrs.op = op; - attrs.parsed = std::move(param); + attrs.parsed = param; SetAttrDict(&attrs); int num_outputs = 0; int num_inputs = 2; diff --git a/src/api/operator/numpy/np_unique_op.cc b/src/api/operator/numpy/np_unique_op.cc index 288260f5dfb2..a669025e108f 100644 --- a/src/api/operator/numpy/np_unique_op.cc +++ b/src/api/operator/numpy/np_unique_op.cc @@ -44,7 +44,7 @@ MXNET_REGISTER_API("_npi.unique") } else { param.axis = args[4].operator int(); } - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; SetAttrDict(&attrs); // inputs diff --git a/src/api/operator/numpy/np_window_op.cc b/src/api/operator/numpy/np_window_op.cc index 6b99c09cc75a..41c78cb16b6d 100644 --- a/src/api/operator/numpy/np_window_op.cc +++ b/src/api/operator/numpy/np_window_op.cc @@ -45,7 +45,7 @@ inline static void SetNumpyWindowsParam(runtime::MXNetArgs args, } else { param.dtype = String2MXNetTypeWithBool(args[1].operator std::string()); } - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; SetAttrDict(&attrs); if (args[2].type_code() != kNull) { diff --git a/src/api/operator/numpy/random/np_choice_op.cc b/src/api/operator/numpy/random/np_choice_op.cc index fe7b54d512c8..bc5ebbcffa58 100644 --- a/src/api/operator/numpy/random/np_choice_op.cc +++ b/src/api/operator/numpy/random/np_choice_op.cc @@ -70,7 +70,7 @@ MXNET_REGISTER_API("_npi.choice") num_inputs++; } - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; if (args[4].type_code() != kNull) { attrs.dict["ctx"] = args[4].operator std::string(); diff --git a/src/api/operator/numpy/random/np_exponential_op.cc b/src/api/operator/numpy/random/np_exponential_op.cc index fbb1644c6c5a..0c5b69417aff 100644 --- a/src/api/operator/numpy/random/np_exponential_op.cc +++ b/src/api/operator/numpy/random/np_exponential_op.cc @@ -58,7 +58,7 @@ MXNET_REGISTER_API("_npi.exponential") inputs[0] = args[0].operator mxnet::NDArray*(); num_inputs = 1; } - attrs.parsed = std::move(param); + attrs.parsed = param; SetAttrDict(&attrs); auto ndoutputs = Invoke(op, &attrs, num_inputs, inputs, &num_outputs, outputs); diff --git a/src/api/operator/numpy/random/np_laplace_op.cc b/src/api/operator/numpy/random/np_laplace_op.cc index 57f770bfa376..a3ff63568828 100644 --- a/src/api/operator/numpy/random/np_laplace_op.cc +++ b/src/api/operator/numpy/random/np_laplace_op.cc @@ -73,7 +73,7 @@ MXNET_REGISTER_API("_npi.laplace") } else { param.dtype = String2MXNetTypeWithBool(args[3].operator std::string()); } - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; SetAttrDict(&attrs); if (args[4].type_code() != kNull) { diff --git a/src/api/operator/numpy/random/np_location_scale_op.cc b/src/api/operator/numpy/random/np_location_scale_op.cc index d163b0b5e014..ffcb41073400 100644 --- a/src/api/operator/numpy/random/np_location_scale_op.cc +++ b/src/api/operator/numpy/random/np_location_scale_op.cc @@ -82,7 +82,7 @@ MXNET_REGISTER_API("_npi.gumbel") } num_inputs = 1; } - attrs.parsed = std::move(param); + attrs.parsed = param; SetAttrDict(&attrs); auto ndoutputs = Invoke(op, &attrs, num_inputs, inputs.data(), &num_outputs, outputs); @@ -137,7 +137,7 @@ MXNET_REGISTER_API("_npi.logistic") } num_inputs = 1; } - attrs.parsed = std::move(param); + attrs.parsed = param; SetAttrDict(&attrs); auto ndoutputs = Invoke(op, &attrs, num_inputs, inputs.data(), &num_outputs, outputs); diff --git a/src/api/operator/numpy/random/np_pareto_op.cc b/src/api/operator/numpy/random/np_pareto_op.cc index 92e3645b75bd..f18cdfdc2ecd 100644 --- a/src/api/operator/numpy/random/np_pareto_op.cc +++ b/src/api/operator/numpy/random/np_pareto_op.cc @@ -58,7 +58,7 @@ MXNET_REGISTER_API("_npi.pareto") inputs[0] = args[0].operator mxnet::NDArray*(); num_inputs = 1; } - attrs.parsed = std::move(param); + attrs.parsed = param; SetAttrDict(&attrs); auto ndoutputs = Invoke(op, &attrs, num_inputs, inputs, &num_outputs, outputs); diff --git a/src/api/operator/numpy/random/np_power_op.cc b/src/api/operator/numpy/random/np_power_op.cc index 12a621726cd2..4f0cb55ef4bc 100644 --- a/src/api/operator/numpy/random/np_power_op.cc +++ b/src/api/operator/numpy/random/np_power_op.cc @@ -58,7 +58,7 @@ MXNET_REGISTER_API("_npi.powerd") inputs[0] = args[0].operator mxnet::NDArray*(); num_inputs = 1; } - attrs.parsed = std::move(param); + attrs.parsed = param; SetAttrDict(&attrs); auto ndoutputs = Invoke(op, &attrs, num_inputs, inputs, &num_outputs, outputs); diff --git a/src/api/operator/numpy/random/np_rayleigh_op.cc b/src/api/operator/numpy/random/np_rayleigh_op.cc index 428e433763ad..5f602af335fd 100644 --- a/src/api/operator/numpy/random/np_rayleigh_op.cc +++ b/src/api/operator/numpy/random/np_rayleigh_op.cc @@ -58,7 +58,7 @@ MXNET_REGISTER_API("_npi.rayleigh") inputs[0] = args[0].operator mxnet::NDArray*(); num_inputs = 1; } - attrs.parsed = std::move(param); + attrs.parsed = param; SetAttrDict(&attrs); auto ndoutputs = Invoke(op, &attrs, num_inputs, inputs, &num_outputs, outputs); diff --git a/src/api/operator/numpy/random/np_weibull_op.cc b/src/api/operator/numpy/random/np_weibull_op.cc index ef3b7e6ed7b6..18a2918634df 100644 --- a/src/api/operator/numpy/random/np_weibull_op.cc +++ b/src/api/operator/numpy/random/np_weibull_op.cc @@ -58,7 +58,7 @@ MXNET_REGISTER_API("_npi.weibull") inputs[0] = args[0].operator mxnet::NDArray*(); num_inputs = 1; } - attrs.parsed = std::move(param); + attrs.parsed = param; SetAttrDict(&attrs); auto ndoutputs = Invoke(op, &attrs, num_inputs, inputs, &num_outputs, outputs); diff --git a/src/api/operator/random/np_gamma_op.cc b/src/api/operator/random/np_gamma_op.cc index 44aeb44c44f8..8bf3717c5825 100644 --- a/src/api/operator/random/np_gamma_op.cc +++ b/src/api/operator/random/np_gamma_op.cc @@ -91,7 +91,7 @@ MXNET_REGISTER_API("_npi.gamma") NDArray* out = args[5].operator mxnet::NDArray*(); NDArray** outputs = out == nullptr ? nullptr : &out; int num_outputs = out != nullptr; - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; if (args[3].type_code() != kNull) { attrs.dict["ctx"] = args[3].operator std::string(); diff --git a/src/api/operator/random/np_normal_op.cc b/src/api/operator/random/np_normal_op.cc index bd39115f77c2..d78f50fdc564 100644 --- a/src/api/operator/random/np_normal_op.cc +++ b/src/api/operator/random/np_normal_op.cc @@ -77,7 +77,7 @@ MXNET_REGISTER_API("_npi.normal") } else { param.dtype = String2MXNetTypeWithBool(args[4].operator std::string()); } - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; if (args[3].type_code() != kNull) { attrs.dict["ctx"] = args[3].operator std::string(); diff --git a/src/api/operator/random/np_uniform_op.cc b/src/api/operator/random/np_uniform_op.cc index 4cbc599cfe4c..41e830fefbed 100644 --- a/src/api/operator/random/np_uniform_op.cc +++ b/src/api/operator/random/np_uniform_op.cc @@ -76,7 +76,7 @@ MXNET_REGISTER_API("_npi.uniform") } else { param.dtype = String2MXNetTypeWithBool(args[4].operator std::string()); } - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; if (args[3].type_code() != kNull) { attrs.dict["ctx"] = args[3].operator std::string(); diff --git a/src/api/operator/tensor/matrix_op.cc b/src/api/operator/tensor/matrix_op.cc index 61344286372e..5b275d5c38a9 100644 --- a/src/api/operator/tensor/matrix_op.cc +++ b/src/api/operator/tensor/matrix_op.cc @@ -79,7 +79,7 @@ MXNET_REGISTER_API("_npi.tile") } else { param.reps = Tuple(args[1].operator ObjectRef()); } - attrs.parsed = std::move(param); + attrs.parsed = param; attrs.op = op; SetAttrDict(&attrs); int num_outputs = 0; diff --git a/src/c_api/c_api.cc b/src/c_api/c_api.cc index 53ff1e41c7f6..3d73ceb03267 100644 --- a/src/c_api/c_api.cc +++ b/src/c_api/c_api.cc @@ -236,13 +236,13 @@ void CustomFComputeDispatcher(const std::string op_name, return static_cast((*cpualloc)(size)); }; - typedef decltype(gpu_alloc) alloc_type_gpu; + using alloc_type_gpu = decltype(gpu_alloc); auto gpu_malloc = [](void* _gpu_alloc, int size) { alloc_type_gpu* gpualloc = static_cast(_gpu_alloc); return static_cast((*gpualloc)(size)); }; - typedef decltype(sparse_alloc) alloc_type_sparse; + using alloc_type_sparse = decltype(sparse_alloc); auto sparse_malloc = [](void* _sparse_alloc, int index, int indices_len, int idxptr_len, void** data, int64_t** indices, int64_t** indptr) { alloc_type_sparse* sparsealloc = static_cast(_sparse_alloc); @@ -1209,7 +1209,7 @@ void registerPasses(void *lib, int verbose) { // create no-capture lambda so that we can cast it to function pointer // lambda with captures cannot be cast to function pointer and pass to lib_api.h // this needs to be a lambda function so that we can do the decltype cast - typedef decltype(ndarray_alloc) alloc_type_ndarray; + using alloc_type_ndarray = decltype(ndarray_alloc); auto ndarray_malloc = [](const void* _ndarray_alloc, const int64_t* shapes, int num_shapes, const char* dev_str, int dev_id, int dtype, const char* name, int isArg, void** data) { @@ -2160,7 +2160,7 @@ int MXDataIterCreateIter(DataIterCreator creator, iter = e->body(); std::vector > kwargs; for (uint32_t i = 0; i < num_param; ++i) { - kwargs.push_back({std::string(keys[i]), std::string(vals[i])}); + kwargs.emplace_back(std::string(keys[i]), std::string(vals[i])); } iter->Init(kwargs); *out = iter; @@ -2287,7 +2287,7 @@ int MXDatasetCreateDataset(DatasetCreator handle, DatasetReg *e = static_cast(handle); std::vector > kwargs; for (uint32_t i = 0; i < num_param; ++i) { - kwargs.push_back({std::string(keys[i]), std::string(vals[i])}); + kwargs.emplace_back(std::string(keys[i]), std::string(vals[i])); } dataset = e->body(kwargs); *out = new std::shared_ptr(dataset); @@ -2304,7 +2304,7 @@ int MXDatasetGetDatasetInfo(DatasetCreator creator, DatasetReg *e = static_cast(creator); return MXAPIGetFunctionRegInfo(e, name, description, num_args, arg_names, arg_type_infos, arg_descriptions, - NULL); + nullptr); } int MXDatasetFree(DatasetHandle handle) { @@ -2375,7 +2375,7 @@ int MXBatchifyFunctionCreateFunction(BatchifyFunctionCreator handle, BatchifyFunctionReg *e = static_cast(handle); std::vector > kwargs; for (uint32_t i = 0; i < num_param; ++i) { - kwargs.push_back({std::string(keys[i]), std::string(vals[i])}); + kwargs.emplace_back(std::string(keys[i]), std::string(vals[i])); } bf = e->body(kwargs); *out = new BatchifyFunctionPtr(bf); @@ -2392,7 +2392,7 @@ int MXBatchifyFunctionGetFunctionInfo(BatchifyFunctionCreator creator, BatchifyFunctionReg *e = static_cast(creator); return MXAPIGetFunctionRegInfo(e, name, description, num_args, arg_names, arg_type_infos, arg_descriptions, - NULL); + nullptr); } int MXBatchifyFunctionInvoke(BatchifyFunctionHandle handle, int batch_size, @@ -3149,8 +3149,8 @@ int MXNDArrayCreateFromSharedMemEx(int shared_pid, int shared_id, const int *sha API_END(); } -typedef Engine::VarHandle VarHandle; -typedef Engine::CallbackOnComplete CallbackOnComplete; +using VarHandle = Engine::VarHandle; +using CallbackOnComplete = Engine::CallbackOnComplete; void AssertValidNumberVars(int num_const_vars, int num_mutable_vars) { CHECK_GE(num_const_vars, 0) << "Non-negative number of const vars expected."; diff --git a/src/c_api/c_api_profile.cc b/src/c_api/c_api_profile.cc index d0ad6a163a0a..79d11b92dff6 100644 --- a/src/c_api/c_api_profile.cc +++ b/src/c_api/c_api_profile.cc @@ -61,7 +61,7 @@ class ProfilingThreadData { /*! * \brief Constructor, nothrow */ - inline ProfilingThreadData() noexcept {} + inline ProfilingThreadData() = default; /*! * \brief Retreive ProfileTask object of the given name, or create if it doesn't exist diff --git a/src/c_api/c_api_symbolic.cc b/src/c_api/c_api_symbolic.cc index 3052256c825d..06f956bc20e9 100644 --- a/src/c_api/c_api_symbolic.cc +++ b/src/c_api/c_api_symbolic.cc @@ -1066,7 +1066,7 @@ int MXQuantizeSymbol(SymbolHandle sym_handle, g = ApplyPass(std::move(g), "QuantizeGraph"); const auto& calib_nodes = g.GetAttr>("calib_nodes"); MXAPIThreadLocalEntry<> *ret = MXAPIThreadLocalStore<>::Get(); - ret->ret_vec_str = std::move(calib_nodes); + ret->ret_vec_str = calib_nodes; *out_num_calib_names = ret->ret_vec_str.size(); ret->ret_vec_charp.clear(); ret->ret_vec_charp.reserve(ret->ret_vec_str.size()); @@ -1130,21 +1130,21 @@ static void _UpdateSymDTypeAttrs( // Update args to have the right dtype attrs if (model_params.size() > 0) { // if model params provided, set dtype only for model params - for (size_t i = 0; i < args.size(); ++i) { - const std::string& node_name = args[i]->attrs.name; + for (const auto & arg : args) { + const std::string& node_name = arg->attrs.name; auto it_model_params = model_params.find(node_name); auto it_with_dtype = node_name_dtype_map.find(node_name); auto it_without_dtype = node_without_dtype_map.find(node_name); if (it_model_params != model_params.end()) { // need to update __dtype__ attribute if already set, else set it if (it_with_dtype != node_name_dtype_map.end()) { - args[i]->attrs.dict[dtype_keyword] = + arg->attrs.dict[dtype_keyword] = std::to_string(it_with_dtype->second); } else { CHECK(it_without_dtype != node_without_dtype_map.end()) << "make sure all nodes without dtype have properly been added " "in node_without_dtype_map"; - args[i]->attrs.dict[dtype_keyword] = + arg->attrs.dict[dtype_keyword] = std::to_string(it_without_dtype->second); } } @@ -1152,12 +1152,12 @@ static void _UpdateSymDTypeAttrs( } else { // if model params not provided, update __dtype__ for all inputs, // which already had it set, don't touch the rest - for (size_t i = 0; i < args.size(); ++i) { - auto it = node_name_dtype_map.find(args[i]->attrs.name); + for (const auto & arg : args) { + auto it = node_name_dtype_map.find(arg->attrs.name); if (it != node_name_dtype_map.end()) { - if (args[i]->attrs.dict.find(dtype_keyword) != - args[i]->attrs.dict.end()) { - args[i]->attrs.dict[dtype_keyword] = std::to_string(it->second); + if (arg->attrs.dict.find(dtype_keyword) != + arg->attrs.dict.end()) { + arg->attrs.dict[dtype_keyword] = std::to_string(it->second); } } } @@ -1256,7 +1256,7 @@ int MXReducePrecisionSymbol(SymbolHandle sym_handle, const nnvm::DTypeVector &inferred_dtypes = g.GetAttr("dtype"); - g.attrs["inferred_dtypes"] = std::make_shared(std::move(inferred_dtypes)); + g.attrs["inferred_dtypes"] = std::make_shared(inferred_dtypes); g.attrs["target_dtype"] = std::make_shared(target_dt); if (cast_optional_params) { diff --git a/src/engine/naive_engine.cc b/src/engine/naive_engine.cc index 5e51199e3304..89f6b4d99089 100644 --- a/src/engine/naive_engine.cc +++ b/src/engine/naive_engine.cc @@ -22,6 +22,7 @@ * \file naive_engine.cc * \brief Implementation of NaiveEngine */ +#include #include #include #include @@ -67,8 +68,8 @@ class NaiveEngine final : public Engine { objpool_var_ref_ = common::ObjectPool::_GetSharedRef(); } // virtual destructor - virtual ~NaiveEngine() { #if MXNET_USE_CUDA + ~NaiveEngine() override { LOG(INFO) << "Engine shutdown"; for (size_t i = 0; i < streams_.size(); ++i) { if (streams_[i] != nullptr) { @@ -83,8 +84,10 @@ class NaiveEngine final : public Engine { aux_streams_[i] = nullptr; } } -#endif } +#else + ~NaiveEngine() override = default; +#endif void Stop() override { } @@ -125,10 +128,10 @@ class NaiveEngine final : public Engine { if (opr->profiling) { std::unique_ptr attrs; if (profiler->AggregateEnabled()) { - attrs.reset(new profiler::ProfileOperator::Attributes()); + attrs = std::make_unique(); } - opr->opr_profile.reset(new profiler::ProfileOperator(opr->opr_name.c_str(), - attrs.release())); + opr->opr_profile = std::make_unique(opr->opr_name.c_str(), + attrs.release()); opr->opr_profile->startForDevice(exec_ctx.dev_type, exec_ctx.dev_id); } opr->fn(ctx, on_complete); @@ -175,9 +178,10 @@ class NaiveEngine final : public Engine { opr->profiling = profiling; std::unique_ptr attrs; if (profiler->AggregateEnabled()) { - attrs.reset(new profiler::ProfileOperator::Attributes()); + attrs = std::make_unique(); } - opr->opr_profile.reset(new profiler::ProfileOperator(opr->opr_name.c_str(), attrs.release())); + opr->opr_profile = std::make_unique(opr->opr_name.c_str(), + attrs.release()); opr->opr_profile->startForDevice(exec_ctx.dev_type, exec_ctx.dev_id); } if (exec_ctx.dev_mask() == gpu::kDevMask) { diff --git a/src/engine/threaded_engine_perdevice.cc b/src/engine/threaded_engine_perdevice.cc index 2184d784a414..81494ec66096 100644 --- a/src/engine/threaded_engine_perdevice.cc +++ b/src/engine/threaded_engine_perdevice.cc @@ -28,6 +28,8 @@ #include #include #include + +#include #include "../initialize.h" #include "./threaded_engine.h" #include "./thread_pool.h" @@ -55,7 +57,7 @@ class ThreadedEnginePerDevice : public ThreadedEngine { ThreadedEnginePerDevice() noexcept(false) { this->Start(); } - ~ThreadedEnginePerDevice() noexcept(false) { + ~ThreadedEnginePerDevice() noexcept(false) override { this->StopNoWait(); } @@ -82,12 +84,12 @@ class ThreadedEnginePerDevice : public ThreadedEngine { gpu_copy_nthreads_ = dmlc::GetEnv("MXNET_GPU_COPY_NTHREADS", 2); // create CPU task int cpu_priority_nthreads = dmlc::GetEnv("MXNET_CPU_PRIORITY_NTHREADS", 4); - cpu_priority_worker_.reset(new ThreadWorkerBlock()); - cpu_priority_worker_->pool.reset(new ThreadPool( + cpu_priority_worker_ = std::make_unique>(); + cpu_priority_worker_->pool = std::make_unique( cpu_priority_nthreads, [this](std::shared_ptr ready_event) { this->CPUWorker(Context(), cpu_priority_worker_.get(), ready_event); - }, true)); + }, true); // GPU tasks will be created lazily } @@ -113,10 +115,10 @@ class ThreadedEnginePerDevice : public ThreadedEngine { auto ptr = cpu_normal_workers_.Get(dev_id, [this, ctx, nthread]() { auto blk = new ThreadWorkerBlock(); - blk->pool.reset(new ThreadPool(nthread, + blk->pool = std::make_unique(nthread, [this, ctx, blk](std::shared_ptr ready_event) { this->CPUWorker(ctx, blk, ready_event); - }, true)); + }, true); return blk; }); if (ptr) { @@ -139,12 +141,12 @@ class ThreadedEnginePerDevice : public ThreadedEngine { // Signify to kernel that GPU is being used, so reserve cores as necessary OpenMP::Get()->set_reserve_cores(GetReserveCoreCount(true)); auto blk = new ThreadWorkerBlock(); - blk->pool.reset(new ThreadPool( + blk->pool = std::make_unique( nthread, [this, ctx, is_copy, blk] (std::shared_ptr ready_event) { this->GPUWorker(ctx, is_copy, blk, ready_event); - }, true)); + }, true); return blk; }); if (ptr) { @@ -162,12 +164,12 @@ class ThreadedEnginePerDevice : public ThreadedEngine { // Signify to kernel that GPU is being used, so reserve cores as necessary OpenMP::Get()->set_reserve_cores(GetReserveCoreCount(true)); auto blk = new ThreadWorkerBlock(); - blk->pool.reset(new ThreadPool( + blk->pool = std::make_unique( nthread, [this, ctx, is_copy, blk] (std::shared_ptr ready_event) { this->GPUWorker(ctx, is_copy, blk, ready_event); - }, true)); + }, true); return blk; }); if (ptr) { @@ -179,12 +181,12 @@ class ThreadedEnginePerDevice : public ThreadedEngine { // Signify to kernel that GPU is being used, so reserve cores as necessary OpenMP::Get()->set_reserve_cores(GetReserveCoreCount(true)); auto blk = new ThreadWorkerBlock(); - blk->pool.reset(new ThreadPool( + blk->pool = std::make_unique( nthread, [this, ctx, is_copy, blk] (std::shared_ptr ready_event) { this->GPUWorker(ctx, is_copy, blk, ready_event); - }, true)); + }, true); return blk; }); if (ptr) { @@ -211,7 +213,7 @@ class ThreadedEnginePerDevice : public ThreadedEngine { // constructor ThreadWorkerBlock() = default; // destructor - ~ThreadWorkerBlock() noexcept(false) {} + ~ThreadWorkerBlock() = default; }; /*! \brief whether this is a worker thread. */ diff --git a/src/engine/threaded_engine_pooled.cc b/src/engine/threaded_engine_pooled.cc index c6eb99508e09..1304594e24a8 100644 --- a/src/engine/threaded_engine_pooled.cc +++ b/src/engine/threaded_engine_pooled.cc @@ -27,6 +27,7 @@ #include #include #include +#include #include #include "./threaded_engine.h" #include "./thread_pool.h" @@ -50,7 +51,7 @@ class ThreadedEnginePooled : public ThreadedEngine { this->Start(); } - ~ThreadedEnginePooled() noexcept(false) { + ~ThreadedEnginePooled() noexcept(false) override { StopNoWait(); } @@ -71,17 +72,17 @@ class ThreadedEnginePooled : public ThreadedEngine { } void Start() override { - streams_.reset(new StreamManager()); + streams_ = std::make_unique>(); task_queue_.reset(new dmlc::ConcurrentBlockingQueue()); io_task_queue_.reset(new dmlc::ConcurrentBlockingQueue()); - thread_pool_.reset(new ThreadPool(kNumWorkingThreads, + thread_pool_ = std::make_unique(kNumWorkingThreads, [this](std::shared_ptr ready_event) { ThreadWorker(task_queue_, ready_event); }, - true)); - io_thread_pool_.reset(new ThreadPool(1, + true); + io_thread_pool_ = std::make_unique(1, [this](std::shared_ptr ready_event) { ThreadWorker(io_task_queue_, ready_event); }, - true)); + true); } protected: diff --git a/src/imperative/attach_op_execs_pass.cc b/src/imperative/attach_op_execs_pass.cc index 8f47bc29db13..d065aff5ac92 100644 --- a/src/imperative/attach_op_execs_pass.cc +++ b/src/imperative/attach_op_execs_pass.cc @@ -27,6 +27,8 @@ #include #include #include + +#include #include "../common/utils.h" #include "../common/exec_utils.h" #include "./exec_pass.h" @@ -45,8 +47,8 @@ namespace exec { // FComputeExecutor and FStatefulComputeExecutor inherit from this class class StorageFallbackOpExecutor : public OpExecutor { public: - explicit StorageFallbackOpExecutor(const std::vector &mutate_idx) - : mutate_idx_(mutate_idx) {} + explicit StorageFallbackOpExecutor(std::vector mutate_idx) + : mutate_idx_(std::move(mutate_idx)) {} void Setup() override { init_ = false; @@ -136,12 +138,12 @@ class StatefulComputeExecutor : public StorageFallbackOpExecutor { return state_; } - explicit StatefulComputeExecutor(const OpStatePtr& state, - const FStatefulCompute& fcompute, + explicit StatefulComputeExecutor(OpStatePtr state, + FStatefulCompute fcompute, ExecType exec_type, const std::vector &mutate_idx) : StorageFallbackOpExecutor(mutate_idx), - state_(state), fcompute_(fcompute), exec_type_(exec_type) {} + state_(std::move(state)), fcompute_(std::move(fcompute)), exec_type_(exec_type) {} private: OpStatePtr state_; @@ -182,11 +184,12 @@ class StatefulComputeExExecutor : public OpExecutor { return state_; } - explicit StatefulComputeExExecutor(const NodeAttrs& attrs, - const OpStatePtr& state, - const FStatefulComputeEx& fcompute, + explicit StatefulComputeExExecutor(NodeAttrs attrs, + OpStatePtr state, + FStatefulComputeEx fcompute, ExecType exec_type) - : attrs_(attrs), state_(state), fcompute_(fcompute), exec_type_(exec_type) {} + : attrs_(std::move(attrs)), state_(std::move(state)), fcompute_(std::move(fcompute)), + exec_type_(exec_type) {} private: NodeAttrs attrs_; @@ -214,10 +217,10 @@ class FComputeExecutor : public StorageFallbackOpExecutor { return exec_type_; } - explicit FComputeExecutor(const NodeAttrs& attrs, FCompute fcompute, + explicit FComputeExecutor(NodeAttrs attrs, FCompute fcompute, ExecType exec_type, const std::vector &mutate_idx) : StorageFallbackOpExecutor(mutate_idx), - attrs_(attrs), fcompute_(fcompute), exec_type_(exec_type) { + attrs_(std::move(attrs)), fcompute_(std::move(fcompute)), exec_type_(exec_type) { } private: @@ -250,9 +253,9 @@ class FComputeExExecutor : public OpExecutor { return exec_type_; } - explicit FComputeExExecutor(const NodeAttrs& attrs, FComputeEx fcompute, + explicit FComputeExExecutor(NodeAttrs attrs, FComputeEx fcompute, ExecType exec_type) - : attrs_(attrs), fcompute_(fcompute), exec_type_(exec_type) { + : attrs_(std::move(attrs)), fcompute_(std::move(fcompute)), exec_type_(exec_type) { } private: diff --git a/src/imperative/cached_op.cc b/src/imperative/cached_op.cc index 7b3a5d32aac6..e0f832917203 100644 --- a/src/imperative/cached_op.cc +++ b/src/imperative/cached_op.cc @@ -16,6 +16,7 @@ * specific language governing permissions and limitations * under the License. */ +#include #include #include #include "./imperative_utils.h" @@ -87,8 +88,7 @@ CachedOp::CachedOp( SetRefCounts(&fwd_graph_, full_graph_); } -CachedOp::~CachedOp() { -} +CachedOp::~CachedOp() = default; std::vector CachedOp::Gradient( const nnvm::ObjectPtr& node, @@ -1286,7 +1286,7 @@ void CachedOpParamParser(nnvm::NodeAttrs* attrs) { std::vector > flags; for (const auto& attr : attrs->dict) flags.emplace_back(attr.first, attr.second); - attrs->parsed = CachedOpPtr(new CachedOp(sym, flags)); + attrs->parsed = std::make_shared(sym, flags); } } diff --git a/src/imperative/cached_op_threadsafe.cc b/src/imperative/cached_op_threadsafe.cc index 744daf08564b..7d93eb84bd11 100644 --- a/src/imperative/cached_op_threadsafe.cc +++ b/src/imperative/cached_op_threadsafe.cc @@ -250,7 +250,7 @@ void CachedOpThreadSafeParamParser(nnvm::NodeAttrs* attrs) { throw dmlc::ParamError(os.str()); } } -CachedOpThreadSafe::~CachedOpThreadSafe() {} +CachedOpThreadSafe::~CachedOpThreadSafe() = default; NNVM_REGISTER_OP(_CachedOpThreadSafe) .set_num_inputs([](const NodeAttrs& attrs) { diff --git a/src/imperative/eliminate_common_expr_pass.cc b/src/imperative/eliminate_common_expr_pass.cc index 805b6acca590..a0156da94746 100644 --- a/src/imperative/eliminate_common_expr_pass.cc +++ b/src/imperative/eliminate_common_expr_pass.cc @@ -184,10 +184,10 @@ void EliminateCommonNodes(Graph* g, // insert Copy nodes as appropriate const Op* copy_op = Op::Get("_copy"); nnvm::NodeEntryMap unique_outputs; - for (size_t i = 0; i < g->outputs.size(); ++i) { - auto kv = unique_outputs.find(g->outputs[i]); + for (auto & output : g->outputs) { + auto kv = unique_outputs.find(output); if (kv == unique_outputs.end()) { - unique_outputs.emplace(g->outputs[i], 0); + unique_outputs.emplace(output, 0); } else { ObjectPtr copy_node = Node::Create(); std::ostringstream os; @@ -196,7 +196,7 @@ void EliminateCommonNodes(Graph* g, copy_node->attrs.op = copy_op; copy_node->attrs.name = os.str(); copy_node->inputs.emplace_back(kv->first); - g->outputs[i] = nnvm::NodeEntry{copy_node, 0, 0}; + output = nnvm::NodeEntry{copy_node, 0, 0}; } } } diff --git a/src/imperative/imperative.cc b/src/imperative/imperative.cc index 45fdf549b0ed..9e162d4b74ea 100644 --- a/src/imperative/imperative.cc +++ b/src/imperative/imperative.cc @@ -119,11 +119,11 @@ OpStatePtr Imperative::Invoke( SetWriteInplaceReq(inputs, outputs, &req); OpStatePtr ret = InvokeOp(ctx, attrs, inputs, outputs, req, dispatch_mode); // the followinng loop is used for finding out the correct shape when some shapes are dynamic - for (size_t i = 0; i < outputs.size(); i++) { - if (!shape_is_known(outputs[i]->shape())) { + for (auto output : outputs) { + if (!shape_is_known(output->shape())) { // the WaitToRead overhead here does not seem to be avoidable - outputs[i]->WaitToRead(); - outputs[i]->SetShapeFromChunk(); + output->WaitToRead(); + output->SetShapeFromChunk(); } } return ret; diff --git a/src/initialize.cc b/src/initialize.cc index a352c0a22024..784e54f9a9d7 100644 --- a/src/initialize.cc +++ b/src/initialize.cc @@ -23,7 +23,6 @@ * \brief initialize mxnet library */ #include "initialize.h" -#include #include #include #include "./engine/openmp.h" @@ -34,7 +33,6 @@ #include "common/utils.h" #include "engine/openmp.h" - #if defined(_WIN32) || defined(_WIN64) || defined(__WINDOWS__) #include /*! @@ -57,6 +55,9 @@ void win_err(char **err) { #include #endif +#include + + namespace mxnet { #if MXNET_USE_SIGNAL_HANDLER && DMLC_LOG_STACK_TRACE diff --git a/src/io/batchify.cc b/src/io/batchify.cc index 27ed850522ff..01d93f5cad8f 100644 --- a/src/io/batchify.cc +++ b/src/io/batchify.cc @@ -73,8 +73,8 @@ class GroupBatchify : public BatchifyFunction { } } - virtual bool Batchify(const std::vector >& inputs, - std::vector* outputs) { + bool Batchify(const std::vector >& inputs, + std::vector* outputs) override { auto bs = inputs.size(); CHECK_GT(bs, 0) << "BatchifyFunction should handle at lease 1 sample"; auto out_size = inputs[0].size(); @@ -84,8 +84,8 @@ class GroupBatchify : public BatchifyFunction { for (size_t i = 0; i < out_size; ++i) { std::vector > inp; inp.reserve(inputs.size()); - for (size_t j = 0; j < inputs.size(); ++j) { - std::vector curr({inputs[j][i]}); + for (const auto & input : inputs) { + std::vector curr({input[i]}); inp.emplace_back(curr); } std::vector tmp; @@ -128,8 +128,8 @@ class StackBatchify : public BatchifyFunction { param_.InitAllowUnknown(kwargs); } - virtual bool Batchify(const std::vector >& inputs, - std::vector* outputs) { + bool Batchify(const std::vector >& inputs, + std::vector* outputs) override { auto out_size = SanityCheck(inputs); auto bs = inputs.size(); outputs->resize(out_size); @@ -235,8 +235,8 @@ class PadBatchify : public BatchifyFunction { param_.InitAllowUnknown(kwargs); } - virtual bool Batchify(const std::vector >& inputs, - std::vector* outputs) { + bool Batchify(const std::vector >& inputs, + std::vector* outputs) override { auto bs = inputs.size(); CHECK_GT(bs, 0) << "BatchifyFunction should handle at lease 1 sample"; auto out_size = inputs[0].size(); diff --git a/src/io/dataloader.cc b/src/io/dataloader.cc index 947c26202b5c..47754470453c 100644 --- a/src/io/dataloader.cc +++ b/src/io/dataloader.cc @@ -63,13 +63,11 @@ DMLC_REGISTER_PARAMETER(ThreadedDataLoaderParam); template class ThreadedDataLoader : public IIterator { public: - ThreadedDataLoader() { - } + ThreadedDataLoader() = default; // destructor - virtual ~ThreadedDataLoader(void) { - } + ~ThreadedDataLoader() override = default; // constructor - void Init(const std::vector >& kwargs) { + void Init(const std::vector >& kwargs) override { param_.InitAllowUnknown(kwargs); int maxthread, threadget; #pragma omp parallel @@ -90,15 +88,15 @@ class ThreadedDataLoader : public IIterator { this->BeforeFirst(); } // before first - void BeforeFirst(void) { + void BeforeFirst() override { sampler_->BeforeFirst(); } - int64_t GetLenHint(void) const { + int64_t GetLenHint() const override { return sampler_->GetLenHint(); } - bool Next(void) { + bool Next() override { bool has_next = sampler_->Next(); if (!has_next) return false; auto samples = sampler_->Value(); @@ -152,7 +150,7 @@ class ThreadedDataLoader : public IIterator { return true; } - const TBlobBatch &Value(void) const { + const TBlobBatch &Value() const override { return out_; } diff --git a/src/io/dataset.cc b/src/io/dataset.cc index 4c47f440150f..ba548c968b19 100644 --- a/src/io/dataset.cc +++ b/src/io/dataset.cc @@ -29,6 +29,7 @@ #include #include +#include #include #include #include @@ -75,11 +76,11 @@ class RecordFileDataset final : public Dataset { delete idx_stream; } - uint64_t GetLen() const { + uint64_t GetLen() const override { return idx_.size(); } - bool GetItem(uint64_t idx, std::vector* ret) { + bool GetItem(uint64_t idx, std::vector* ret) override { ret->resize(1); auto& out = (*ret)[0]; static thread_local std::unique_ptr stream; @@ -193,11 +194,11 @@ class ImageRecordFileDataset : public Dataset { base_ = std::make_shared(kwargs); } - uint64_t GetLen() const { + uint64_t GetLen() const override { return base_->GetLen(); } - bool GetItem(uint64_t idx, std::vector* ret) { + bool GetItem(uint64_t idx, std::vector* ret) override { CHECK_LT(idx, GetLen()); std::vector raw; if (!base_->GetItem(idx, &raw)) return false; @@ -292,11 +293,11 @@ class ImageSequenceDataset final : public Dataset { img_list_ = dmlc::Split(param_.img_list, param_.path_sep); } - uint64_t GetLen() const { + uint64_t GetLen() const override { return img_list_.size(); } - bool GetItem(uint64_t idx, std::vector* ret) { + bool GetItem(uint64_t idx, std::vector* ret) override { #if MXNET_USE_OPENCV CHECK_LT(idx, img_list_.size()) << "GetItem index: " << idx << " out of bound: " << img_list_.size(); @@ -355,11 +356,11 @@ class NDArrayDataset final : public Dataset { size_ = data_.shape().begin()[0]; } - uint64_t GetLen() const { + uint64_t GetLen() const override { return size_; } - bool GetItem(uint64_t idx, std::vector* rets) { + bool GetItem(uint64_t idx, std::vector* rets) override { CHECK_LT(idx, size_) << "GetItem index: " << idx << " out of bound: " << size_; rets->resize(1); @@ -430,11 +431,11 @@ class GroupDataset final : public Dataset { } } - uint64_t GetLen() const { + uint64_t GetLen() const override { return size_; } - bool GetItem(uint64_t idx, std::vector* ret) { + bool GetItem(uint64_t idx, std::vector* ret) override { CHECK_LT(idx, size_) << "GetItem index: " << idx << " out of bound: " << size_; ret->clear(); @@ -485,11 +486,11 @@ class IndexedDataset final : public Dataset { base_data_ = *static_cast*>(reinterpret_cast(param_.base)); } - uint64_t GetLen() const { + uint64_t GetLen() const override { return param_.indices.ndim(); } - bool GetItem(uint64_t idx, std::vector* ret) { + bool GetItem(uint64_t idx, std::vector* ret) override { CHECK_GT(param_.indices.ndim(), idx) << "IndexError: " << idx << " from total: " << param_.indices.ndim(); auto new_idx = param_.indices[idx]; @@ -545,15 +546,15 @@ class LazyTransformDataset final : public Dataset { this->pass_through_indices_ = other.pass_through_indices_; this->use_input_indices_ = other.use_input_indices_; this->num_outputs_ = other.num_outputs_; - this->cached_op_ = NaiveCachedOpPtr(new NaiveCachedOp( - other.cached_op_->sym_, other.cached_op_->flags_)); + this->cached_op_ = std::make_shared( + other.cached_op_->sym_, other.cached_op_->flags_); this->base_data_ = other.base_data_; } explicit LazyTransformDataset(const std::vector >& kwargs) { param_.InitAllowUnknown(kwargs); auto op = *static_cast(reinterpret_cast(param_.cached_op)); - cached_op_ = NaiveCachedOpPtr(new NaiveCachedOp(op->sym_, op->flags_)); + cached_op_ = std::make_shared(op->sym_, op->flags_); base_data_ = *static_cast*>(reinterpret_cast(param_.dataset)); // use first item to calculate size info @@ -596,14 +597,13 @@ class LazyTransformDataset final : public Dataset { num_outputs_ = inputs.size() + cached_op_->num_outputs() - cached_op_->num_inputs(); } - virtual ~LazyTransformDataset(void) { - } + ~LazyTransformDataset() override = default; - uint64_t GetLen() const { + uint64_t GetLen() const override { return base_data_->GetLen(); } - bool GetItem(uint64_t idx, std::vector* outputs) { + bool GetItem(uint64_t idx, std::vector* outputs) override { std::vector inputs; if (!base_data_->GetItem(idx, &inputs)) return false; outputs->reserve(num_outputs_); @@ -616,8 +616,8 @@ class LazyTransformDataset final : public Dataset { std::vector ndinputs; std::vector ndoutputs; ndinputs.reserve(inputs.size()); - for (size_t i = 0; i < use_input_indices_.size(); ++i) { - ndinputs.emplace_back(&(inputs[use_input_indices_[i]])); + for (int use_input_indice : use_input_indices_) { + ndinputs.emplace_back(&(inputs[use_input_indice])); } ndoutputs.reserve(cached_op_->num_outputs()); CHECK_LE(cached_op_->num_outputs(), outputs->size()); @@ -625,8 +625,8 @@ class LazyTransformDataset final : public Dataset { ndoutputs.emplace_back(&(outputs->at(i))); } - for (size_t i = 0; i < inputs.size(); ++i) { - inputs[i].WaitToRead(); + for (auto & input : inputs) { + input.WaitToRead(); } CHECK(inputs.size() > 0) << "dataset getitem requires at least one input"; Context default_ctx = inputs[0].ctx(); diff --git a/src/io/image_aug_default.cc b/src/io/image_aug_default.cc index c26ebf857a41..c39777ba3054 100644 --- a/src/io/image_aug_default.cc +++ b/src/io/image_aug_default.cc @@ -205,7 +205,7 @@ std::vector ListDefaultAugParams() { class DefaultImageAugmenter : public ImageAugmenter { public: // contructor - DefaultImageAugmenter() {} + DefaultImageAugmenter() = default; void Init(const std::vector >& kwargs) override { std::vector > kwargs_left; kwargs_left = param_.InitAllowUnknown(kwargs); diff --git a/src/io/image_det_aug_default.cc b/src/io/image_det_aug_default.cc index f602a63954a3..6b3109fbce19 100644 --- a/src/io/image_det_aug_default.cc +++ b/src/io/image_det_aug_default.cc @@ -404,7 +404,7 @@ class ImageDetLabel { class DefaultImageDetAugmenter : public ImageAugmenter { public: // contructor - DefaultImageDetAugmenter() {} + DefaultImageDetAugmenter() = default; void Init(const std::vector >& kwargs) override { std::vector > kwargs_left; diff --git a/src/io/iter_csv.cc b/src/io/iter_csv.cc index 0c1b82355410..87f295df544f 100644 --- a/src/io/iter_csv.cc +++ b/src/io/iter_csv.cc @@ -62,16 +62,16 @@ class CSVIterBase: public IIterator { CSVIterBase() { out_.data.resize(2); } - virtual ~CSVIterBase() {} + ~CSVIterBase() override = default; // initialize iterator loads data in - virtual void Init(const std::vector >& kwargs) = 0; + void Init(const std::vector >& kwargs) override = 0; /*! \brief reset the iterator */ - virtual void BeforeFirst(void) = 0; + void BeforeFirst() override = 0; /*! \brief move to next item */ - virtual bool Next(void) = 0; + bool Next() override = 0; /*! \brief get current data */ - virtual const DataInst &Value(void) const { + const DataInst &Value() const override { return out_; } @@ -93,9 +93,9 @@ class CSVIterBase: public IIterator { template class CSVIterTyped: public CSVIterBase { public: - virtual ~CSVIterTyped() {} + ~CSVIterTyped() override = default; // intialize iterator loads data in - virtual void Init(const std::vector >& kwargs) { + void Init(const std::vector >& kwargs) override { param_.InitAllowUnknown(kwargs); data_parser_.reset(dmlc::Parser::Create(param_.data_csv.c_str(), 0, 1, "csv")); if (param_.label_csv != "NULL") { @@ -108,7 +108,7 @@ class CSVIterTyped: public CSVIterBase { } } - virtual void BeforeFirst() { + void BeforeFirst() override { data_parser_->BeforeFirst(); if (label_parser_.get() != nullptr) { label_parser_->BeforeFirst(); @@ -119,7 +119,7 @@ class CSVIterTyped: public CSVIterBase { end_ = false; } - virtual bool Next() { + bool Next() override { if (end_) return false; while (data_ptr_ >= data_size_) { if (!data_parser_->Next()) { @@ -163,11 +163,11 @@ class CSVIterTyped: public CSVIterBase { class CSVIter: public IIterator { public: - CSVIter() {} - virtual ~CSVIter() {} + CSVIter() = default; + ~CSVIter() override = default; // intialize iterator loads data in - virtual void Init(const std::vector >& kwargs) { + void Init(const std::vector >& kwargs) override { param_.InitAllowUnknown(kwargs); bool dtype_has_value = false; int target_dtype = -1; @@ -195,15 +195,15 @@ class CSVIter: public IIterator { iterator_->Init(kwargs); } - virtual void BeforeFirst() { + void BeforeFirst() override { iterator_->BeforeFirst(); } - virtual bool Next() { + bool Next() override { return iterator_->Next(); } - virtual const DataInst &Value(void) const { + const DataInst &Value() const override { return iterator_->Value(); } diff --git a/src/io/iter_image_det_recordio.cc b/src/io/iter_image_det_recordio.cc index 876c07520f52..3fe0ec7f3e17 100644 --- a/src/io/iter_image_det_recordio.cc +++ b/src/io/iter_image_det_recordio.cc @@ -32,6 +32,7 @@ #include #include #include +#include #include #include #include @@ -213,7 +214,7 @@ class ImageDetRecordIOParser { inline void Init(const std::vector >& kwargs); // set record to the head - inline void BeforeFirst(void) { + inline void BeforeFirst() { return source_->BeforeFirst(); } // parse next set of records, return an array of @@ -273,8 +274,8 @@ inline void ImageDetRecordIOParser::Init( prnds_.emplace_back(new common::RANDOM_ENGINE((i + 1) * kRandMagic)); } if (param_.path_imglist.length() != 0) { - label_map_.reset(new ImageDetLabelMap(param_.path_imglist.c_str(), - param_.label_width, !param_.verbose)); + label_map_ = std::make_unique(param_.path_imglist.c_str(), + param_.label_width, !param_.verbose); } CHECK(param_.path_imgrec.length() != 0) << "ImageDetRecordIOIterator: must specify image_rec"; @@ -510,12 +511,12 @@ class ImageDetRecordIter : public IIterator { public: ImageDetRecordIter() : data_(nullptr) { } // destructor - virtual ~ImageDetRecordIter(void) { + ~ImageDetRecordIter() override { iter_.Destroy(); delete data_; } // constructor - virtual void Init(const std::vector >& kwargs) { + void Init(const std::vector >& kwargs) override { param_.InitAllowUnknown(kwargs); // use the kwarg to init parser parser_.Init(kwargs); @@ -533,13 +534,13 @@ class ImageDetRecordIter : public IIterator { rnd_.seed(kRandMagic + param_.seed); } // before first - virtual void BeforeFirst(void) { + void BeforeFirst() override { iter_.BeforeFirst(); inst_order_.clear(); inst_ptr_ = 0; } - virtual bool Next(void) { + bool Next() override { while (true) { if (inst_ptr_ < inst_order_.size()) { std::pair p = inst_order_[inst_ptr_]; @@ -553,7 +554,7 @@ class ImageDetRecordIter : public IIterator { for (unsigned i = 0; i < data_->size(); ++i) { const InstVector& tmp = (*data_)[i]; for (unsigned j = 0; j < tmp.Size(); ++j) { - inst_order_.push_back(std::make_pair(i, j)); + inst_order_.emplace_back(i, j); } } // shuffle instance order if needed @@ -566,7 +567,7 @@ class ImageDetRecordIter : public IIterator { return false; } - virtual const DataInst &Value(void) const { + const DataInst &Value() const override { return out_; } diff --git a/src/io/iter_image_recordio.cc b/src/io/iter_image_recordio.cc index 066cad973774..23008050ec28 100644 --- a/src/io/iter_image_recordio.cc +++ b/src/io/iter_image_recordio.cc @@ -32,6 +32,7 @@ #include #include #include +#include #include #include #include @@ -53,7 +54,7 @@ class ImageRecordIOParser { inline void Init(const std::vector >& kwargs); // set record to the head - inline void BeforeFirst(void) { + inline void BeforeFirst() { return source_->BeforeFirst(); } // parse next set of records, return an array of @@ -111,8 +112,8 @@ inline void ImageRecordIOParser::Init( prnds_.emplace_back(new common::RANDOM_ENGINE((i + 1) * kRandMagic)); } if (param_.path_imglist.length() != 0) { - label_map_.reset(new ImageLabelMap(param_.path_imglist.c_str(), - param_.label_width, !param_.verbose)); + label_map_ = std::make_unique(param_.path_imglist.c_str(), + param_.label_width, !param_.verbose); } CHECK(param_.path_imgrec.length() != 0) << "ImageRecordIOIterator: must specify image_rec"; @@ -253,12 +254,12 @@ class ImageRecordIter : public IIterator { public: ImageRecordIter() : data_(nullptr) { } // destructor - virtual ~ImageRecordIter(void) { + ~ImageRecordIter() override { iter_.Destroy(); delete data_; } // constructor - virtual void Init(const std::vector >& kwargs) { + void Init(const std::vector >& kwargs) override { param_.InitAllowUnknown(kwargs); // use the kwarg to init parser parser_.Init(kwargs); @@ -276,13 +277,13 @@ class ImageRecordIter : public IIterator { rnd_.seed(kRandMagic + param_.seed); } // before first - virtual void BeforeFirst(void) { + void BeforeFirst() override { iter_.BeforeFirst(); inst_order_.clear(); inst_ptr_ = 0; } - virtual bool Next(void) { + bool Next() override { while (true) { if (inst_ptr_ < inst_order_.size()) { std::pair p = inst_order_[inst_ptr_]; @@ -296,7 +297,7 @@ class ImageRecordIter : public IIterator { for (unsigned i = 0; i < data_->size(); ++i) { const InstVector& tmp = (*data_)[i]; for (unsigned j = 0; j < tmp.Size(); ++j) { - inst_order_.push_back(std::make_pair(i, j)); + inst_order_.emplace_back(i, j); } } // shuffle instance order if needed @@ -309,7 +310,7 @@ class ImageRecordIter : public IIterator { return false; } - virtual const DataInst &Value(void) const { + const DataInst &Value() const override { return out_; } diff --git a/src/io/iter_image_recordio_2.cc b/src/io/iter_image_recordio_2.cc index ad6e2a19fbc2..f4f88b76d65c 100644 --- a/src/io/iter_image_recordio_2.cc +++ b/src/io/iter_image_recordio_2.cc @@ -33,6 +33,7 @@ #include #include #include +#include #include #if MXNET_USE_LIBJPEG_TURBO #include @@ -55,7 +56,7 @@ class ImageRecordIOParser2 { inline void Init(const std::vector >& kwargs); // set record to the head - inline void BeforeFirst(void) { + inline void BeforeFirst() { if (batch_param_.round_batch == 0 || !overflow) { n_parsed_ = 0; return source_->BeforeFirst(); @@ -79,7 +80,7 @@ class ImageRecordIOParser2 { #endif inline size_t ParseChunk(DType* data_dptr, real_t* label_dptr, const size_t current_size, dmlc::InputSplit::Blob * chunk); - inline void CreateMeanImg(void); + inline void CreateMeanImg(); // magic number to seed prng static const int kRandMagic = 111; @@ -169,8 +170,8 @@ inline void ImageRecordIOParser2::Init( prnds_.emplace_back(new common::RANDOM_ENGINE((i + 1) * kRandMagic)); } if (param_.path_imglist.length() != 0) { - label_map_.reset(new ImageLabelMap(param_.path_imglist.c_str(), - param_.label_width, !param_.verbose)); + label_map_ = std::make_unique(param_.path_imglist.c_str(), + param_.label_width, !param_.verbose); } CHECK(param_.path_imgrec.length() != 0) << "ImageRecordIter2: must specify image_rec"; @@ -665,7 +666,7 @@ inline size_t ImageRecordIOParser2::ParseChunk(DType* data_dptr, real_t* // create mean image. template -inline void ImageRecordIOParser2::CreateMeanImg(void) { +inline void ImageRecordIOParser2::CreateMeanImg() { if (param_.verbose) { LOG(INFO) << "Cannot find " << normalize_param_.mean_img << ": create mean image, this will take some time..."; @@ -677,8 +678,7 @@ inline void ImageRecordIOParser2::CreateMeanImg(void) { inst_order_.clear(); // Parse chunk w/o putting anything in out ParseChunk(nullptr, nullptr, batch_param_.batch_size, &chunk); - for (size_t i = 0; i < inst_order_.size(); ++i) { - std::pair place = inst_order_[i]; + for (auto place : inst_order_) { mshadow::Tensor outimg = temp_[place.first][place.second].data[0].template get(); if (imcnt == 0) { @@ -714,13 +714,13 @@ inline void ImageRecordIOParser2::CreateMeanImg(void) { template class ImageRecordIter2 : public IIterator { public: - ImageRecordIter2() : out_(nullptr) { } + ImageRecordIter2() = default; - virtual ~ImageRecordIter2(void) { + ~ImageRecordIter2() override { iter_.Destroy(); } - virtual void Init(const std::vector >& kwargs) { + void Init(const std::vector >& kwargs) override { prefetch_param_.InitAllowUnknown(kwargs); parser_.Init(kwargs); // maximum prefetch threaded iter internal size @@ -737,12 +737,12 @@ class ImageRecordIter2 : public IIterator { [this]() { parser_.BeforeFirst(); }); } - virtual void BeforeFirst(void) { + void BeforeFirst() override { iter_.BeforeFirst(); } // From iter_prefetcher.h - virtual bool Next(void) { + bool Next() override { if (out_ != nullptr) { recycle_queue_.push(out_); out_ = nullptr; } @@ -759,7 +759,7 @@ class ImageRecordIter2 : public IIterator { return iter_.Next(&out_); } - virtual const DataBatch &Value(void) const { + const DataBatch &Value() const override { return *out_; } @@ -769,7 +769,7 @@ class ImageRecordIter2 : public IIterator { /*! \brief Parameters */ PrefetcherParam prefetch_param_; /*! \brief output data */ - DataBatch *out_; + DataBatch *out_{nullptr}; /*! \brief queue to be recycled */ std::queue recycle_queue_; /* \brief parser */ @@ -784,19 +784,19 @@ class ImageRecordIter2CPU : public IIterator { var_ = Engine::Get()->NewVariable(); } - virtual ~ImageRecordIter2CPU(void) { + ~ImageRecordIter2CPU() override { Engine::Get()->DeleteVariable([](mxnet::RunContext ctx) {}, Context::CPU(), var_); delete out_; } - virtual void Init(const std::vector>& kwargs) { + void Init(const std::vector>& kwargs) override { parser_.Init(kwargs); } - virtual void BeforeFirst(void) { parser_.BeforeFirst(); } + void BeforeFirst() override { parser_.BeforeFirst(); } // From iter_prefetcher.h - virtual bool Next(void) { + bool Next() override { bool result = false; const auto engine = Engine::Get(); engine->PushSync( @@ -808,7 +808,7 @@ class ImageRecordIter2CPU : public IIterator { return result; } - virtual const DataBatch& Value(void) const { return *out_; } + const DataBatch& Value() const override { return *out_; } private: /*! \brief Backend thread */ @@ -824,7 +824,7 @@ class ImageRecordIter2CPU : public IIterator { class ImageRecordIter2Wrapper : public IIterator { public: - ~ImageRecordIter2Wrapper(void) override { + ~ImageRecordIter2Wrapper() override { if (record_iter_) delete record_iter_; } void Init(const std::vector>& kwargs) override { @@ -869,14 +869,14 @@ class ImageRecordIter2Wrapper : public IIterator { record_iter_->Init(kwargs); } - void BeforeFirst(void) override { + void BeforeFirst() override { record_iter_->BeforeFirst(); } // From iter_prefetcher.h - bool Next(void) override { return record_iter_->Next(); } + bool Next() override { return record_iter_->Next(); } - const DataBatch &Value(void) const override { + const DataBatch &Value() const override { return record_iter_->Value(); } diff --git a/src/io/iter_libsvm.cc b/src/io/iter_libsvm.cc index 3decc7b33e04..0965bfc5192e 100644 --- a/src/io/iter_libsvm.cc +++ b/src/io/iter_libsvm.cc @@ -66,11 +66,11 @@ struct LibSVMIterParam : public dmlc::Parameter { class LibSVMIter: public SparseIIterator { public: - LibSVMIter() {} - virtual ~LibSVMIter() {} + LibSVMIter() = default; + ~LibSVMIter() override = default; // intialize iterator loads data in - virtual void Init(const std::vector >& kwargs) { + void Init(const std::vector >& kwargs) override { param_.InitAllowUnknown(kwargs); CHECK_EQ(param_.data_shape.ndim(), 1) << "dimension of data_shape is expected to be 1"; CHECK_GT(param_.num_parts, 0) << "number of parts should be positive"; @@ -97,7 +97,7 @@ class LibSVMIter: public SparseIIterator { } } - virtual void BeforeFirst() { + void BeforeFirst() override { data_parser_->BeforeFirst(); if (label_parser_.get() != nullptr) { label_parser_->BeforeFirst(); @@ -108,7 +108,7 @@ class LibSVMIter: public SparseIIterator { end_ = false; } - virtual bool Next() { + bool Next() override { if (end_) return false; while (data_ptr_ >= data_size_) { if (!data_parser_->Next()) { @@ -144,16 +144,16 @@ class LibSVMIter: public SparseIIterator { return true; } - virtual const DataInst &Value(void) const { + const DataInst &Value() const override { return out_; } - virtual const NDArrayStorageType GetStorageType(bool is_data) const { + const NDArrayStorageType GetStorageType(bool is_data) const override { if (is_data) return kCSRStorage; return param_.label_shape.Size() > 1 ? kCSRStorage : kDefaultStorage; } - virtual const mxnet::TShape GetShape(bool is_data) const { + const mxnet::TShape GetShape(bool is_data) const override { if (is_data) return param_.data_shape; return param_.label_shape; } diff --git a/src/io/iter_mnist.cc b/src/io/iter_mnist.cc index b752ce48d417..0d5f96c0e193 100644 --- a/src/io/iter_mnist.cc +++ b/src/io/iter_mnist.cc @@ -79,15 +79,15 @@ struct MNISTParam : public dmlc::Parameter { class MNISTIter: public IIterator { public: - MNISTIter(void) : loc_(0), inst_offset_(0) { + MNISTIter() { img_.dptr_ = nullptr; out_.data.resize(2); } - virtual ~MNISTIter(void) { + ~MNISTIter() override { delete []img_.dptr_; } // intialize iterator loads data in - virtual void Init(const std::vector >& kwargs) { + void Init(const std::vector >& kwargs) override { std::map kmap(kwargs.begin(), kwargs.end()); param_.InitAllowUnknown(kmap); this->LoadImage(); @@ -115,10 +115,10 @@ class MNISTIter: public IIterator { } } } - virtual void BeforeFirst(void) { + void BeforeFirst() override { this->loc_ = 0; } - virtual bool Next(void) { + bool Next() override { if (loc_ + param_.batch_size <= img_.size(0)) { batch_data_.dptr_ = img_[loc_].dptr_; batch_label_.dptr_ = &labels_[loc_]; @@ -135,7 +135,7 @@ class MNISTIter: public IIterator { return false; } } - virtual const TBlobBatch &Value(void) const { + const TBlobBatch &Value() const override { return out_; } @@ -151,7 +151,7 @@ class MNISTIter: public IIterator { static_cast(count) / param_.num_parts * (param_.part_index+1)); } - inline void LoadImage(void) { + inline void LoadImage() { dmlc::SeekStream* stdimg = dmlc::SeekStream::CreateForRead(param_.image.c_str()); ReadInt(stdimg); @@ -184,7 +184,7 @@ class MNISTIter: public IIterator { img_ *= 1.0f / 256.0f; delete stdimg; } - inline void LoadLabel(void) { + inline void LoadLabel() { dmlc::SeekStream* stdlabel = dmlc::SeekStream::CreateForRead(param_.label.c_str()); ReadInt(stdlabel); @@ -206,7 +206,7 @@ class MNISTIter: public IIterator { } delete stdlabel; } - inline void Shuffle(void) { + inline void Shuffle() { std::shuffle(inst_.begin(), inst_.end(), common::RANDOM_ENGINE(kRandMagic + param_.seed)); std::vector tmplabel(labels_.size()); mshadow::TensorContainer tmpimg(img_.shape_); @@ -238,7 +238,7 @@ class MNISTIter: public IIterator { /*! \brief output */ TBlobBatch out_; /*! \brief current location */ - index_t loc_; + index_t loc_{0}; /*! \brief image content */ mshadow::Tensor img_; /*! \brief label content */ @@ -248,7 +248,7 @@ class MNISTIter: public IIterator { /*! \brief batch label tensor */ mshadow::Tensor batch_label_; /*! \brief instance index offset */ - unsigned inst_offset_; + unsigned inst_offset_{0}; /*! \brief instance index */ std::vector inst_; // magic number to setup randomness diff --git a/src/io/iter_sampler.cc b/src/io/iter_sampler.cc index 932bcc9fe38e..049347dfd9cf 100644 --- a/src/io/iter_sampler.cc +++ b/src/io/iter_sampler.cc @@ -27,6 +27,7 @@ #include #include #include +#include #include #include "../common/utils.h" #include "./iter_batchloader.h" @@ -52,22 +53,22 @@ DMLC_REGISTER_PARAMETER(SequentialSamplerParam); class SequentialSampler : public IIterator { public: - virtual void Init(const std::vector >& kwargs) { + void Init(const std::vector >& kwargs) override { param_.InitAllowUnknown(kwargs); indices_.resize(param_.length); std::iota(std::begin(indices_), std::end(indices_), 0); // fill like arange out_.data.resize(1); } - virtual void BeforeFirst(void) { + void BeforeFirst() override { pos_ = 0; } - virtual int64_t GetLenHint(void) const { + int64_t GetLenHint() const override { return static_cast(indices_.size()); } - virtual bool Next(void) { + bool Next() override { if (pos_ < indices_.size()) { int64_t *ptr = indices_.data() + pos_; out_.data[0] = TBlob(ptr, TShape({1, }), cpu::kDevMask, 0); @@ -77,7 +78,7 @@ class SequentialSampler : public IIterator { return false; } - virtual const DataInst &Value(void) const { + const DataInst &Value() const override { return out_; } @@ -117,27 +118,27 @@ DMLC_REGISTER_PARAMETER(RandomSamplerParam); class RandomSampler : public IIterator { public: - virtual void Init(const std::vector >& kwargs) { + void Init(const std::vector >& kwargs) override { param_.InitAllowUnknown(kwargs); indices_.resize(param_.length); std::iota(std::begin(indices_), std::end(indices_), 0); // fill like arange mshadow::Random *ctx_rng = ResourceManager::Get()->Request( Context::CPU(), ResourceRequest::kRandom).get_random(nullptr); - rng_.reset(new common::RANDOM_ENGINE(ctx_rng->GetSeed())); + rng_ = std::make_unique(ctx_rng->GetSeed()); out_.data.resize(1); BeforeFirst(); } - virtual void BeforeFirst(void) { + void BeforeFirst() override { std::shuffle(std::begin(indices_), std::end(indices_), *rng_); pos_ = 0; } - virtual int64_t GetLenHint(void) const { + int64_t GetLenHint() const override { return static_cast(indices_.size()); } - virtual bool Next(void) { + bool Next() override { if (pos_ < indices_.size()) { int64_t *ptr = indices_.data() + pos_; out_.data[0] = TBlob(ptr, TShape({1, }), cpu::kDevMask, 0); @@ -147,7 +148,7 @@ class RandomSampler : public IIterator { return false; } - virtual const DataInst &Value(void) const { + const DataInst &Value() const override { return out_; } private: diff --git a/src/kvstore/kvstore.cc b/src/kvstore/kvstore.cc index fa3f91097ba6..87daad60ccc2 100644 --- a/src/kvstore/kvstore.cc +++ b/src/kvstore/kvstore.cc @@ -23,7 +23,6 @@ * \brief implement kv_store */ #include -#include #include #include "./kvstore_local.h" @@ -36,6 +35,8 @@ std::atomic mxnet::kvstore::KVStoreDist::customer_id_{0}; #include "./kvstore_nccl.h" #endif // MXNET_USE_NCCL +#include + namespace mxnet { KVStore* KVStore::Create(const char *type_name) { diff --git a/src/nnvm/gradient.cc b/src/nnvm/gradient.cc index 09c02b2aa26f..447f658890d8 100644 --- a/src/nnvm/gradient.cc +++ b/src/nnvm/gradient.cc @@ -168,8 +168,8 @@ Graph Gradient(Graph src) { // information is needed in later stages to determine whether putting a node // on the mirror path can be beneficial or not. using mxnet::ShapeVector; - ShapeVector in_arg_shapes = std::move(src.GetAttr("in_arg_shapes")); - DTypeVector in_arg_dtypes = std::move(src.GetAttr("in_arg_dtypes")); + ShapeVector in_arg_shapes = src.GetAttr("in_arg_shapes"); + DTypeVector in_arg_dtypes = src.GetAttr("in_arg_dtypes"); src = mxnet::exec::InferShape(std::move(src), std::move(in_arg_shapes), "__shape__"); src = mxnet::exec::InferType(std::move(src), std::move(in_arg_dtypes), "__dtype__"); CHECK(src.GetAttr("shape_num_unknown_nodes") == 0U); @@ -583,8 +583,7 @@ Graph BuildGradientGraph( // gather all the output gradient entries and apply the aggregation function out_agg_grads.clear(); auto& out_grad_vec = output_grads.at(src_fwd_node.get()); - for (uint32_t i = 0; i < out_grad_vec.size(); ++i) { - GradEntry& e = out_grad_vec[i]; + for (auto & e : out_grad_vec) { e.sum = agg_fun(std::move(e.grads)); out_agg_grads.push_back(e.sum); } @@ -698,7 +697,7 @@ Graph BuildGradientGraph( // register pass NNVM_REGISTER_PASS(MXGradient) -.describe("Return a gradient graph of src.attrs[\"ys\"] wrt src.attrs[\"xs\"]") +.describe(R"(Return a gradient graph of src.attrs["ys"] wrt src.attrs["xs"])") .set_body(Gradient) .set_change_graph(true) .depend_graph_attr("grad_ys") diff --git a/src/nnvm/graph_editor.cc b/src/nnvm/graph_editor.cc index 2d2053c536d0..44a807eda174 100644 --- a/src/nnvm/graph_editor.cc +++ b/src/nnvm/graph_editor.cc @@ -27,6 +27,8 @@ #include #include +#include + namespace nnvm { ObjectPtr CreateVariableNode(const std::string& name); } @@ -67,7 +69,7 @@ bool CutGraphInputs(const std::vector &input_entries, bool skip_var, std::vector *orig_entries) { struct pred_entry { nnvm::NodeEntry e; - explicit pred_entry(const nnvm::NodeEntry &_e): e(_e) {} + explicit pred_entry(nnvm::NodeEntry _e): e(std::move(_e)) {} bool operator()(const nnvm::NodeEntry e1) { return e.node == e1.node && e.index == e1.index; } diff --git a/src/nnvm/low_precision_pass.cc b/src/nnvm/low_precision_pass.cc index 66ec59d44f19..a13344dfccf5 100644 --- a/src/nnvm/low_precision_pass.cc +++ b/src/nnvm/low_precision_pass.cc @@ -134,11 +134,10 @@ static bool CheckConditionalFP32( auto it_params = it->second; // For each param name, iterate through param values to check // if the provided param name is equal to any of the values - for (auto it_param = it_params.begin(); it_param != it_params.end(); - it_param++) { - auto param_key = node->attrs.dict.find(it_param->first); + for (auto & it_param : it_params) { + auto param_key = node->attrs.dict.find(it_param.first); if (param_key != node->attrs.dict.end()) { - auto it_param_vals = it_param->second; + auto it_param_vals = it_param.second; if (std::find(it_param_vals.begin(), it_param_vals.end(), param_key->second) != it_param_vals.end()) { return true; @@ -282,13 +281,13 @@ Graph ReducePrecision(Graph &&src) { << "can't handle the widest_dtype_ops with mutable inputs."; int out_dtype = target_dtype; bool have_unknown_dtype = false; - for (size_t i = 0; i < node->inputs.size(); ++i) { + for (auto & input : node->inputs) { // Try to infer output dtype based on input dtype - if (!mirror_target_dtype_map.count(node->inputs[i]) - && !mirror_fp32_map.count(node->inputs[i])) { + if (!mirror_target_dtype_map.count(input) + && !mirror_fp32_map.count(input)) { have_unknown_dtype = true; break; - } else if (mirror_fp32_map.count(node->inputs[i])) { + } else if (mirror_fp32_map.count(input)) { out_dtype = mshadow::kFloat32; } } diff --git a/src/nnvm/tvm_bridge.cc b/src/nnvm/tvm_bridge.cc index 17e05e3316cd..66e010a4e534 100644 --- a/src/nnvm/tvm_bridge.cc +++ b/src/nnvm/tvm_bridge.cc @@ -40,6 +40,7 @@ #include #include +#include namespace mxnet { @@ -55,7 +56,7 @@ class TVMFunctor { public: // constructor explicit TVMFunctor(PackedFunc func, PackedFunc fset_stream) - : func_(func), fset_stream_(fset_stream) {} + : func_(std::move(func)), fset_stream_(std::move(fset_stream)) {} void Init(const TVMArgs& args, const std::vector& const_loc, diff --git a/src/operator/contrib/boolean_mask.cc b/src/operator/contrib/boolean_mask.cc index a4e924eb3446..882984430d52 100644 --- a/src/operator/contrib/boolean_mask.cc +++ b/src/operator/contrib/boolean_mask.cc @@ -68,8 +68,8 @@ bool BooleanMaskBackStorageType(const nnvm::NodeAttrs& attrs, for (int &attr : *out_attrs) { attr = kDefaultStorage; } - for (size_t i = 0; i < out_attrs->size(); i++) - out_attrs->at(i) = kDefaultStorage; + for (int & out_attr : *out_attrs) + out_attr = kDefaultStorage; *dispatch_mode = DispatchMode::kFComputeEx; return true; } diff --git a/src/operator/contrib/dgl_graph.cc b/src/operator/contrib/dgl_graph.cc index 89bee8abf655..c8e27f38d1a6 100644 --- a/src/operator/contrib/dgl_graph.cc +++ b/src/operator/contrib/dgl_graph.cc @@ -26,6 +26,7 @@ #include #include #include +#include #include "../elemwise_op_common.h" #include "../../imperative/imperative_utils.h" @@ -63,7 +64,7 @@ class ArrayHeap { } } } - ~ArrayHeap() {} + ~ArrayHeap() = default; /* * Remove term from index (this costs O(log m) steps) @@ -417,8 +418,8 @@ static void RandomSample(size_t set_size, sampled_idxs.insert(distribution(generator)); } out->clear(); - for (auto it = sampled_idxs.begin(); it != sampled_idxs.end(); it++) { - out->push_back(*it); + for (size_t sampled_idx : sampled_idxs) { + out->push_back(sampled_idx); } } @@ -528,9 +529,9 @@ static void GetNonUniformSample(const float* probability, struct neigh_list { std::vector neighs; std::vector edges; - neigh_list(const std::vector &_neighs, - const std::vector &_edges) - : neighs(_neighs), edges(_edges) {} + neigh_list(std::vector _neighs, + std::vector _edges) + : neighs(std::move(_neighs)), edges(std::move(_edges)) {} }; /* @@ -620,25 +621,25 @@ static void SampleSubgraph(const NDArray &csr, // First we push the size of neighbor vector neighbor_list.push_back(tmp_sampled_edge_list.size()); // Then push the vertices - for (size_t i = 0; i < tmp_sampled_src_list.size(); ++i) { - neighbor_list.push_back(tmp_sampled_src_list[i]); + for (dgl_id_t & i : tmp_sampled_src_list) { + neighbor_list.push_back(i); } // Finally we push the edge list - for (size_t i = 0; i < tmp_sampled_edge_list.size(); ++i) { - neighbor_list.push_back(tmp_sampled_edge_list[i]); + for (dgl_id_t & i : tmp_sampled_edge_list) { + neighbor_list.push_back(i); } num_edges += tmp_sampled_src_list.size(); - for (size_t i = 0; i < tmp_sampled_src_list.size(); ++i) { + for (dgl_id_t & i : tmp_sampled_src_list) { // If we have sampled the max number of vertices, we have to stop. if (sub_ver_mp.size() >= max_num_vertices) break; // We need to add the neighbor in the hashtable here. This ensures that // the vertex in the queue is unique. If we see a vertex before, we don't // need to add it to the queue again. - auto ret = sub_ver_mp.insert(tmp_sampled_src_list[i]); + auto ret = sub_ver_mp.insert(i); // If the sampled neighbor is inserted to the map successfully. if (ret.second) - sub_vers.emplace_back(tmp_sampled_src_list[i], cur_node_level + 1); + sub_vers.emplace_back(i, cur_node_level + 1); } } // Let's check if there is a vertex that we haven't sampled its neighbors. @@ -960,8 +961,8 @@ static bool DGLSubgraphStorageType(const nnvm::NodeAttrs& attrs, bool success = true; *dispatch_mode = DispatchMode::kFComputeEx; - for (size_t i = 0; i < out_attrs->size(); i++) { - if (!type_assign(&(*out_attrs)[i], mxnet::kCSRStorage)) + for (int & out_attr : *out_attrs) { + if (!type_assign(&out_attr, mxnet::kCSRStorage)) success = false; } return success; @@ -999,8 +1000,8 @@ static bool DGLSubgraphType(const nnvm::NodeAttrs& attrs, for (size_t i = 0; i < num_g; i++) { CHECK_EQ(in_attrs->at(i + 1), mshadow::kInt64); } - for (size_t i = 0; i < out_attrs->size(); i++) { - out_attrs->at(i) = in_attrs->at(0); + for (int & out_attr : *out_attrs) { + out_attr = in_attrs->at(0); } return true; } @@ -1016,7 +1017,7 @@ class Bitmap { public: Bitmap(const dgl_id_t *vid_data, int64_t len): map(size) { for (int64_t i = 0; i < len; ++i) { - map[hash(vid_data[i])] = 1; + map[hash(vid_data[i])] = true; } } @@ -1531,8 +1532,8 @@ static bool SubgraphCompactStorageType(const nnvm::NodeAttrs& attrs, bool success = true; *dispatch_mode = DispatchMode::kFComputeEx; - for (size_t i = 0; i < out_attrs->size(); i++) { - if (!type_assign(&(*out_attrs)[i], mxnet::kCSRStorage)) + for (int & out_attr : *out_attrs) { + if (!type_assign(&out_attr, mxnet::kCSRStorage)) success = false; } return success; @@ -1570,11 +1571,11 @@ static bool SubgraphCompactShape(const nnvm::NodeAttrs& attrs, static bool SubgraphCompactType(const nnvm::NodeAttrs& attrs, std::vector *in_attrs, std::vector *out_attrs) { - for (size_t i = 0; i < in_attrs->size(); i++) { - CHECK_EQ(in_attrs->at(i), mshadow::kInt64); + for (int & in_attr : *in_attrs) { + CHECK_EQ(in_attr, mshadow::kInt64); } - for (size_t i = 0; i < out_attrs->size(); i++) { - out_attrs->at(i) = mshadow::kInt64; + for (int & out_attr : *out_attrs) { + out_attr = mshadow::kInt64; } return true; } diff --git a/src/operator/contrib/multi_proposal.cc b/src/operator/contrib/multi_proposal.cc index e77a0b5aeba1..bf8555ca1c09 100644 --- a/src/operator/contrib/multi_proposal.cc +++ b/src/operator/contrib/multi_proposal.cc @@ -289,11 +289,11 @@ class MultiProposalOp : public Operator{ this->param_ = param; } - virtual void Forward(const OpContext &ctx, + void Forward(const OpContext &ctx, const std::vector &in_data, const std::vector &req, const std::vector &out_data, - const std::vector &aux_states) { + const std::vector &aux_states) override { using namespace mshadow; using namespace mshadow::expr; CHECK_EQ(in_data.size(), 3); @@ -458,13 +458,13 @@ class MultiProposalOp : public Operator{ } } - virtual void Backward(const OpContext &ctx, + void Backward(const OpContext &ctx, const std::vector &out_grad, const std::vector &in_data, const std::vector &out_data, const std::vector &req, const std::vector &in_grad, - const std::vector &aux_states) { + const std::vector &aux_states) override { using namespace mshadow; using namespace mshadow::expr; CHECK_EQ(in_grad.size(), 3); diff --git a/src/operator/contrib/proposal.cc b/src/operator/contrib/proposal.cc index 935372d34dbe..1ca14537c2f6 100644 --- a/src/operator/contrib/proposal.cc +++ b/src/operator/contrib/proposal.cc @@ -278,11 +278,11 @@ class ProposalOp : public Operator{ this->param_ = param; } - virtual void Forward(const OpContext &ctx, + void Forward(const OpContext &ctx, const std::vector &in_data, const std::vector &req, const std::vector &out_data, - const std::vector &aux_states) { + const std::vector &aux_states) override { using namespace mshadow; using namespace mshadow::expr; CHECK_EQ(in_data.size(), 3); @@ -420,13 +420,13 @@ class ProposalOp : public Operator{ } } - virtual void Backward(const OpContext &ctx, + void Backward(const OpContext &ctx, const std::vector &out_grad, const std::vector &in_data, const std::vector &out_data, const std::vector &req, const std::vector &in_grad, - const std::vector &aux_states) { + const std::vector &aux_states) override { using namespace mshadow; using namespace mshadow::expr; CHECK_EQ(in_grad.size(), 3); diff --git a/src/operator/contrib/rroi_align.cc b/src/operator/contrib/rroi_align.cc index 14690d6270d2..415eecf8eedf 100644 --- a/src/operator/contrib/rroi_align.cc +++ b/src/operator/contrib/rroi_align.cc @@ -27,7 +27,7 @@ */ #include "./rroi_align-inl.h" #include -#include "math.h" +#include using std::max; using std::min; diff --git a/src/operator/control_flow.cc b/src/operator/control_flow.cc index e6cc90ac13dc..7926f26f3f24 100644 --- a/src/operator/control_flow.cc +++ b/src/operator/control_flow.cc @@ -24,6 +24,8 @@ #include #include #include + +#include #include "./operator_common.h" #include "./elemwise_op_common.h" #include "../imperative/imperative_utils.h" @@ -648,9 +650,9 @@ static void WhileLoopComputeExCPU(const OpStatePtr& state_ptr, const_cast(outputs[i]).SetShapeFromChunk(); } if (state.n_iterations == 0) { - for (size_t i = 0; i < outputs.size(); ++i) { - if (!shape_is_known(outputs[i].shape())) { - const_cast(outputs[i]).ReshapeAndAlloc({1}); + for (const auto & output : outputs) { + if (!shape_is_known(output.shape())) { + const_cast(output).ReshapeAndAlloc({1}); } } } @@ -865,11 +867,11 @@ class CondState { LoopState else_branch; int branch_selection; // 1 if then branch; 0 if else branch; -1 if undefined - CondState(const CondParam ¶ms, + CondState(CondParam params, const nnvm::Symbol &cond, const nnvm::Symbol &then_sym, const nnvm::Symbol &else_sym): - params(params), + params(std::move(params)), cond_op(LoopState::MakeSharedOp(cond)), then_branch(then_sym), else_branch(else_sym), diff --git a/src/operator/leaky_relu.cc b/src/operator/leaky_relu.cc index 681ca44b357f..8a1a07573f4e 100644 --- a/src/operator/leaky_relu.cc +++ b/src/operator/leaky_relu.cc @@ -202,7 +202,7 @@ The following modified ReLU Activation functions are supported: .set_attr("FSetInputVarAttrOnCompose", [](const nnvm::NodeAttrs& attrs, nnvm::ObjectPtr var, const int index) { if (index == 1 && var->attrs.dict.find("__init__") == var->attrs.dict.end()) { - var->attrs.dict["__init__"] = "[\"Constant\", {\"value\": 0.25}]"; + var->attrs.dict["__init__"] = R"(["Constant", {"value": 0.25}])"; } }); diff --git a/src/operator/numpy/np_einsum_op.cc b/src/operator/numpy/np_einsum_op.cc index 522780f5f3ad..a89f1ad40de4 100644 --- a/src/operator/numpy/np_einsum_op.cc +++ b/src/operator/numpy/np_einsum_op.cc @@ -56,8 +56,8 @@ */ #include "./np_einsum_op-inl.h" -#include -#include +#include +#include namespace mxnet { namespace op { diff --git a/src/operator/numpy/np_indexing_op.cc b/src/operator/numpy/np_indexing_op.cc index 3c2a041f955a..1f1fecc1c8f3 100644 --- a/src/operator/numpy/np_indexing_op.cc +++ b/src/operator/numpy/np_indexing_op.cc @@ -161,8 +161,8 @@ bool AdvancedIndexingOpBackStorageType(const nnvm::NodeAttrs& attrs, for (int &attr : *out_attrs) { attr = kDefaultStorage; } - for (size_t i = 0; i < out_attrs->size(); i++) - out_attrs->at(i) = kDefaultStorage; + for (int & out_attr : *out_attrs) + out_attr = kDefaultStorage; *dispatch_mode = DispatchMode::kFComputeEx; return true; } diff --git a/src/operator/numpy/np_polynomial_op.cc b/src/operator/numpy/np_polynomial_op.cc index 155c98fd1cc5..72df77cf2d25 100644 --- a/src/operator/numpy/np_polynomial_op.cc +++ b/src/operator/numpy/np_polynomial_op.cc @@ -22,7 +22,7 @@ * \file np_polynomial_op.cc */ -#include +#include #include "np_polynomial_op-inl.h" namespace mxnet { diff --git a/src/operator/operator_tune.cc b/src/operator/operator_tune.cc index b5e253a1872e..4c66f00b14d6 100644 --- a/src/operator/operator_tune.cc +++ b/src/operator/operator_tune.cc @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -#include +#include #include #include "./mxnet_op.h" #include "./mshadow_op.h" diff --git a/src/operator/quantization/quantize_graph_pass.cc b/src/operator/quantization/quantize_graph_pass.cc index 012134bcfa8d..ff75801af410 100644 --- a/src/operator/quantization/quantize_graph_pass.cc +++ b/src/operator/quantization/quantize_graph_pass.cc @@ -225,8 +225,8 @@ static void MarkQuantizedNodes(const Graph& src, while (!task_queue.empty()) { const auto& node = task_queue.front(); task_queue.pop(); - for (size_t i = 0; i < node->inputs.size(); ++i) { - const auto& input = node->inputs[i].node; + for (auto & i : node->inputs) { + const auto& input = i.node; auto it = support_quantize_nodes.find(input); if (it != support_quantize_nodes.end()) { it->second = it->second | kFromInput; @@ -243,8 +243,7 @@ static void MarkQuantizedNodes(const Graph& src, const auto& node = task_queue.front(); task_queue.pop(); const auto& outputs = node_output_map[node]; - for (size_t i = 0; i < outputs.size(); ++i) { - const auto& output = outputs[i]; + for (const auto & output : outputs) { auto it = support_quantize_nodes.find(output); if (it != support_quantize_nodes.end()) { it->second = it->second | kFromOutput; diff --git a/src/operator/quantization/quantized_elemwise_mul.cc b/src/operator/quantization/quantized_elemwise_mul.cc index 7d1798f14503..0988a8bcceaf 100644 --- a/src/operator/quantization/quantized_elemwise_mul.cc +++ b/src/operator/quantization/quantized_elemwise_mul.cc @@ -186,7 +186,7 @@ void QuantizedElemwiseMulOpForward(const nnvm::NodeAttrs &attrs, out_data[i] = static_cast(a * b * out_scale); } } else { - typedef int32_t out_type; + using out_type = int32_t; auto *out_data = outputs[quantized_elemwise_mul::kOut].dptr(); #if !defined(_MSC_VER) #pragma omp simd @@ -198,7 +198,7 @@ void QuantizedElemwiseMulOpForward(const nnvm::NodeAttrs &attrs, } } } else { - typedef float_t out_type; + using out_type = float_t; auto *out_data = outputs[quantized_elemwise_mul::kOut].dptr(); #if !defined(_MSC_VER) #pragma omp simd diff --git a/src/operator/quantization/quantized_fully_connected.cc b/src/operator/quantization/quantized_fully_connected.cc index d88aac86851a..e8caf79b05a9 100644 --- a/src/operator/quantization/quantized_fully_connected.cc +++ b/src/operator/quantization/quantized_fully_connected.cc @@ -154,7 +154,7 @@ struct QuantizedSumInitKernelWithBias { const float *max_out, const float *min_bias, const float *max_bias) { typedef int32_t T1; - typedef int8_t T2; + using T2 = int8_t; using mshadow::red::limits::MinValue; using mshadow::red::limits::MaxValue; float float_for_one_out_quant = diff --git a/src/operator/subgraph/default_subgraph_property.cc b/src/operator/subgraph/default_subgraph_property.cc index dd3bfd14ae28..ff51b6397c04 100644 --- a/src/operator/subgraph/default_subgraph_property.cc +++ b/src/operator/subgraph/default_subgraph_property.cc @@ -17,6 +17,8 @@ * under the License. */ +#include + #include "./common.h" #include "./subgraph_property.h" #include "../../imperative/cached_op.h" @@ -33,15 +35,15 @@ class ContainOpSelector: public SubgraphSelector { explicit ContainOpSelector(const std::unordered_set& op_names) : op_names_(op_names) {} - virtual bool Select(const nnvm::Node &seed_node) { + bool Select(const nnvm::Node &seed_node) override { return !seed_node.is_variable() && op_names_.count(seed_node.op()->name); } - virtual bool SelectInput(const nnvm::Node &cur_node, const nnvm::Node &input_node) { + bool SelectInput(const nnvm::Node &cur_node, const nnvm::Node &input_node) override { return !input_node.is_variable() && op_names_.count(input_node.op()->name); } - virtual bool SelectOutput(const nnvm::Node &cur_node, const nnvm::Node &output_node) { + bool SelectOutput(const nnvm::Node &cur_node, const nnvm::Node &output_node) override { return !output_node.is_variable() && op_names_.count(output_node.op()->name); } private: @@ -55,19 +57,19 @@ class ContainOpSelector: public SubgraphSelector { class DefaultSubgraphProperty: public SubgraphProperty { public: static SubgraphPropertyPtr Create() { return std::make_shared(); } - virtual nnvm::ObjectPtr CreateSubgraphNode(const nnvm::Symbol &sym, - const int subgraph_id = 0) const { + nnvm::ObjectPtr CreateSubgraphNode(const nnvm::Symbol &sym, + const int subgraph_id = 0) const override { nnvm::ObjectPtr n = nnvm::Node::Create(); n->attrs.op = Op::Get("_CachedOp"); n->attrs.name = "_CachedOp" + std::to_string(subgraph_id); n->attrs.subgraphs.push_back(std::make_shared(sym)); std::vector > flags{{"static_alloc", "true"}}; - n->attrs.parsed = CachedOpPtr(new CachedOp(sym, flags)); + n->attrs.parsed = std::make_shared(sym, flags); return n; } - virtual SubgraphSelectorPtr CreateSubgraphSelector() const { + SubgraphSelectorPtr CreateSubgraphSelector() const override { return std::make_shared( this->GetAttr>("op_names")); } diff --git a/src/operator/subgraph/default_subgraph_property_v2.cc b/src/operator/subgraph/default_subgraph_property_v2.cc index 65aaeb1f45ce..7c942300ad12 100644 --- a/src/operator/subgraph/default_subgraph_property_v2.cc +++ b/src/operator/subgraph/default_subgraph_property_v2.cc @@ -18,6 +18,8 @@ */ +#include + #include "./common.h" #include "./subgraph_property.h" #include "../../imperative/cached_op.h" @@ -68,7 +70,7 @@ class DefaultSubgraphProperty: public SubgraphProperty { n->attrs.subgraphs.push_back(std::make_shared(sym)); std::vector > flags{{"static_alloc", "true"}}; - n->attrs.parsed = CachedOpPtr(new CachedOp(sym, flags)); + n->attrs.parsed = std::make_shared(sym, flags); return n; } diff --git a/src/profiler/aggregate_stats.cc b/src/profiler/aggregate_stats.cc index 86791ebf3074..9d56dd35d6c2 100644 --- a/src/profiler/aggregate_stats.cc +++ b/src/profiler/aggregate_stats.cc @@ -235,8 +235,8 @@ void AggregateStats::DumpJson(std::ostream& os, int sort_by, int ascending) { << " }" << std::endl << "," << std::endl << " \"Unit\": {" << std::endl - << " \"Time\": \"ms\"," << std::endl - << " \"Memory\": \"kB\"" << std::endl + << R"( "Time": "ms",)" << std::endl + << R"( "Memory": "kB")" << std::endl << " }" << std::endl << "}" << std::endl << std::flush; diff --git a/src/profiler/profiler.cc b/src/profiler/profiler.cc index 13ab462ab69c..080d0454faff 100644 --- a/src/profiler/profiler.cc +++ b/src/profiler/profiler.cc @@ -154,9 +154,9 @@ void Profiler::SetConfig(int mode, */ void Profiler::EmitPid(std::ostream *os, const std::string& name, size_t pid) { (*os) << " {\n" - << " \"ph\": \"" << static_cast(ProfileStat::kMetadata) << "\",\n" + << R"( "ph": ")" << static_cast(ProfileStat::kMetadata) << "\",\n" << " \"args\": {\n" - << " \"name\": \"" << name << "\"\n" + << R"( "name": ")" << name << "\"\n" << " },\n" << " \"pid\": " << pid << ",\n" << " \"name\": \"process_name\"\n" @@ -246,7 +246,7 @@ void Profiler::DumpProfile(bool perform_cleanup) { if (last_pass) { file << "\n" << std::endl; file << " ]," << std::endl; - file << " \"displayTimeUnit\": \"ms\"" << std::endl; + file << R"( "displayTimeUnit": "ms")" << std::endl; file << "}" << std::endl; } enable_output_ = continuous_dump_ && !last_pass; // If we're appending, then continue. diff --git a/src/resource.cc b/src/resource.cc index 9f5ecaf89b27..28e24e5c6984 100644 --- a/src/resource.cc +++ b/src/resource.cc @@ -31,6 +31,7 @@ #include #include #include +#include #include "./common/lazy_alloc_array.h" #include "./common/utils.h" #include "./common/cuda_utils.h" @@ -91,8 +92,7 @@ struct SpaceAllocator { // Implements resource manager class ResourceManagerImpl : public ResourceManager { public: - ResourceManagerImpl() noexcept(false) - : global_seed_(0) { + ResourceManagerImpl() noexcept(false) { cpu_temp_space_copy_ = dmlc::GetEnv("MXNET_CPU_TEMP_COPY", 4); gpu_temp_space_copy_ = dmlc::GetEnv("MXNET_GPU_TEMP_COPY", 1); cpu_native_rand_copy_ = dmlc::GetEnv("MXNET_CPU_PARALLEL_RAND_COPY", 1); @@ -102,14 +102,14 @@ class ResourceManagerImpl : public ResourceManager { #endif // MXNET_USE_CUDNN == 1 engine_ref_ = Engine::_GetSharedRef(); storage_ref_ = Storage::_GetSharedRef(); - cpu_rand_.reset(new ResourceRandom( - Context::CPU(), global_seed_)); - cpu_space_.reset(new ResourceTempSpace( - Context::CPU(), cpu_temp_space_copy_)); - cpu_parallel_rand_.reset(new ResourceParallelRandom( - Context::CPU(), cpu_native_rand_copy_, global_seed_)); + cpu_rand_ = std::make_unique>( + Context::CPU(), global_seed_); + cpu_space_ = std::make_unique>( + Context::CPU(), cpu_temp_space_copy_); + cpu_parallel_rand_ = std::make_unique>( + Context::CPU(), cpu_native_rand_copy_, global_seed_); } - ~ResourceManagerImpl() { + ~ResourceManagerImpl() override { // need explicit delete, before engine get killed cpu_rand_.reset(nullptr); cpu_space_.reset(nullptr); @@ -390,7 +390,7 @@ class ResourceManagerImpl : public ResourceManager { /*! \brief Reference to the storage */ std::shared_ptr storage_ref_; /*! \brief internal seed to the random number generator */ - uint32_t global_seed_; + uint32_t global_seed_{0}; /*! \brief CPU random number resources */ std::unique_ptr > cpu_rand_; /*! \brief CPU temp space resources */ diff --git a/src/runtime/registry.cc b/src/runtime/registry.cc index 276c1ba73d18..d1511806aa27 100644 --- a/src/runtime/registry.cc +++ b/src/runtime/registry.cc @@ -44,7 +44,7 @@ struct Registry::Manager { std::mutex mutex; // vtable for extension type is not suported for now - Manager() {} + Manager() = default; static Manager* Global() { // We deliberately leak the Manager instance, to avoid leak sanitizers diff --git a/src/storage/storage.cc b/src/storage/storage.cc index 438a6b872021..f359b30f151e 100644 --- a/src/storage/storage.cc +++ b/src/storage/storage.cc @@ -44,7 +44,7 @@ class StorageImpl : public Storage { void SharedIncrementRefCount(Handle handle) override; StorageImpl() = default; - virtual ~StorageImpl() = default; + ~StorageImpl() override = default; private: std::shared_ptr storage_manager(const Context &ctx) { diff --git a/tests/cpp/engine/thread_local_test.cc b/tests/cpp/engine/thread_local_test.cc index f842b1d52018..6801b377ef83 100644 --- a/tests/cpp/engine/thread_local_test.cc +++ b/tests/cpp/engine/thread_local_test.cc @@ -23,7 +23,6 @@ * \brief Tests thread safety and lifetime of thread local store */ #include -#include #include #include #include @@ -31,6 +30,7 @@ #include #include #include +#include #include #include #include diff --git a/tests/cpp/engine/threaded_engine_test.cc b/tests/cpp/engine/threaded_engine_test.cc index e1e3a53e656c..11ca2c94c1c0 100644 --- a/tests/cpp/engine/threaded_engine_test.cc +++ b/tests/cpp/engine/threaded_engine_test.cc @@ -22,7 +22,6 @@ * \file threaded_engine_test.cc * \brief threaded engine tests */ -#include #include #include #include @@ -31,6 +30,7 @@ #include #include #include +#include #include #include #include @@ -107,7 +107,7 @@ double EvaluateWorkloads(const std::vector& workloads, for (const auto& wl : workloads) { if (wl.reads.size() == 0) continue; - if (engine == NULL) { + if (engine == nullptr) { EvaluateWorkload(wl, data); } else { auto func = [wl, data](RunContext ctx, Engine::CallbackOnComplete cb) { @@ -152,13 +152,13 @@ TEST(Engine, RandSumExpr) { std::vector t(num_engine, 0.0); std::vector engine(num_engine); - engine[0] = NULL; + engine[0] = nullptr; engine[1] = mxnet::engine::CreateNaiveEngine(); engine[2] = mxnet::engine::CreateThreadedEnginePooled(); engine[3] = mxnet::engine::CreateThreadedEnginePerDevice(); for (int repeat = 0; repeat < num_repeat; ++repeat) { - srand(time(NULL) + repeat); + srand(time(nullptr) + repeat); int num_var = 100; GenerateWorkload(10000, num_var, 2, 20, 1, 10, &workloads); std::vector> data(num_engine); diff --git a/tests/cpp/operator/batchnorm_test.cc b/tests/cpp/operator/batchnorm_test.cc index 22bcb70387a8..dab8c98a7da8 100644 --- a/tests/cpp/operator/batchnorm_test.cc +++ b/tests/cpp/operator/batchnorm_test.cc @@ -277,7 +277,7 @@ class BatchNormValidator : public test::op::Validator { typedef test::op::Validator Super; /*! \brief Only static functions in this class */ - BatchNormValidator() = delete; + BatchNormValidator() = delete; // NOLINT /*! \brief Check batch norm output - 1D */ static void checkBatchNorm1D(const TBlob *blob) { @@ -566,10 +566,9 @@ static const test::op::kwargs_t nfs_ugs_kwargs_nocudnn = { #if !DISABLE_VALIDATION static bool isUGS(const test::op::kwargs_t& kwargs) { - for (test::op::kwargs_t::const_iterator i = kwargs.begin(), - e = kwargs.end(); i != e; ++i) { - if (!i->first.compare("use_global_stats")) { - return i->second.compare("True") == 0; + for (const auto & kwarg : kwargs) { + if (!kwarg.first.compare("use_global_stats")) { + return kwarg.second.compare("True") == 0; } } return false; @@ -725,8 +724,8 @@ static test::op::OpInfoPair test size_t thisCount = 0; - typedef typename OperatorExecutor::DataType DType; - typedef typename OperatorExecutor::AccRealType AccReal; + using DType = typename OperatorExecutor::DataType; + using AccReal = typename OperatorExecutor::AccRealType; do { const bool isLast = thisCount == cycleCount - 1; @@ -1288,8 +1287,8 @@ static void testSaveAndLoad(const std::vector& dims, TEST(BATCH_NORM, TestChannelAxisSaveAndLoad) { std::cout << std::endl << std::flush; - typedef float DType; - typedef float AccReal; + using DType = float; + using AccReal = float; const std::vector> myData = { { 1.0f, 1.0f, 1.0f, 1.0f }, @@ -1346,8 +1345,8 @@ static void runChannelAxisTest( const size_t numberOfPasses = 5 ) { - typedef float DType; - typedef float AccReal; + using DType = float; + using AccReal = float; size_t spatialSize = 1; for (size_t x = 1, n = shape.size(); x < n; ++x) { diff --git a/tests/cpp/operator/runner/core_op_runner_test.cc b/tests/cpp/operator/runner/core_op_runner_test.cc index 96458cd1c713..6e6cb91096fe 100644 --- a/tests/cpp/operator/runner/core_op_runner_test.cc +++ b/tests/cpp/operator/runner/core_op_runner_test.cc @@ -148,7 +148,7 @@ TEST(CORE_OP_RUNNER, ExecuteBidirectionalRunnerSimpleUnary) { } TEST(CORE_OP_RUNNER, ExecuteBidirectionalRunner) { - typedef float DType; + using DType = float; mxnet::TShape shape({5, 5}); for (const std::pair& i : test_binary_operators) { const char *op_name = i.first.c_str(); @@ -163,7 +163,7 @@ TEST(CORE_OP_RUNNER, ExecuteBidirectionalRunner) { * \brief Test RunBidirectional dot product, which has different shaped inputs and outputs */ TEST(CORE_OP_RUNNER, ExecuteBidirectionalRunnerDotProduct) { - typedef float DType; + using DType = float; const char *op_name = "dot"; const char *backward_op_name = "_backward_dot"; test::op::CoreOperatorRunner runner; @@ -179,7 +179,7 @@ TEST(CORE_OP_RUNNER, ExecuteBidirectionalRunnerDotProduct) { * \brief Timing tests for CPU */ TEST(CORE_OP_RUNNER, TimingCPUSimpleUnary) { - typedef float DType; + using DType = float; const char *op_name = "relu"; @@ -210,7 +210,7 @@ TEST(CORE_OP_RUNNER, TimingCPUSimpleUnary) { } TEST(CORE_OP_RUNNER, TimingCPUBinary) { - typedef float DType; + using DType = float; const char *op_name = "elemwise_add"; const char *backward_op_name = "_backward_add"; @@ -246,7 +246,7 @@ TEST(CORE_OP_RUNNER, TimingCPUBinary) { * \brief Performance run dot product, which has different shaped inputs and outputs */ TEST(CORE_OP_RUNNER, TimingCPUBinaryDotProduct) { - typedef float DType; + using DType = float; const char *op_name = "dot"; const char *backward_op_name = "_backward_dot"; diff --git a/tests/cpp/storage/storage_test.cc b/tests/cpp/storage/storage_test.cc index 3934c091faef..d9e7d8bc0294 100644 --- a/tests/cpp/storage/storage_test.cc +++ b/tests/cpp/storage/storage_test.cc @@ -21,11 +21,11 @@ * \file storage_test.cc * \brief cpu/gpu storage tests */ -#include #include #include #include #include +#include #include "test_util.h" TEST(Storage, Basic_CPU) { diff --git a/tools/im2rec.cc b/tools/im2rec.cc index 989b3147830d..ddb3f7f11c1e 100644 --- a/tools/im2rec.cc +++ b/tools/im2rec.cc @@ -247,7 +247,7 @@ int main(int argc, char *argv[]) { if (unchanged != 1) { cv::Mat img = cv::imdecode(decode_buf, color_mode); - CHECK(img.data != NULL) << "OpenCV decode fail:" << path; + CHECK(img.data != nullptr) << "OpenCV decode fail:" << path; cv::Mat res = img; if (new_size > 0) { if (center_crop) {