diff --git a/.gitmodules b/.gitmodules index e0ffec11bfd0..151b9fe39a86 100644 --- a/.gitmodules +++ b/.gitmodules @@ -22,7 +22,7 @@ branch = master [submodule "3rdparty/tvm"] path = 3rdparty/tvm - url = https://github.com/dmlc/tvm + url = https://github.com/larroy/tvm [submodule "3rdparty/onnx-tensorrt"] path = 3rdparty/onnx-tensorrt url = https://github.com/onnx/onnx-tensorrt.git diff --git a/3rdparty/dmlc-core b/3rdparty/dmlc-core index e879d04dc263..3943914eed66 160000 --- a/3rdparty/dmlc-core +++ b/3rdparty/dmlc-core @@ -1 +1 @@ -Subproject commit e879d04dc263561ab11a3837f6c1fa0326a85898 +Subproject commit 3943914eed66470bd010df581e29e4dca4f7df6f diff --git a/3rdparty/tvm b/3rdparty/tvm index 8518c7ddb561..21935dcbf56a 160000 --- a/3rdparty/tvm +++ b/3rdparty/tvm @@ -1 +1 @@ -Subproject commit 8518c7ddb561afba8112324fad4b35b8d111c525 +Subproject commit 21935dcbf56ad3bd66ebff9891a6bc3865b8106d diff --git a/src/c_api/c_api_function.cc b/src/c_api/c_api_function.cc index 50f9b32d6e47..3adc581b50a5 100644 --- a/src/c_api/c_api_function.cc +++ b/src/c_api/c_api_function.cc @@ -56,7 +56,7 @@ std::vector Gradient( std::vector ret; for (uint32_t i = 0; i < g->num_outputs(); ++i) { - ret.emplace_back(nnvm::NodeEntry{g, i, 0}); + ret.emplace_back(std::move(g), i, 0); } return ret; diff --git a/src/executor/graph_executor.cc b/src/executor/graph_executor.cc index da1f13bce6c6..c2193611e95c 100644 --- a/src/executor/graph_executor.cc +++ b/src/executor/graph_executor.cc @@ -223,7 +223,7 @@ nnvm::NodeEntry AggregateGradient(std::vector&& v) { ng->attrs.op = Op::Get("_zeros_without_dtype"); ng->attrs.name = "zeros_without_dtype"; ng->attrs.op->attr_parser(&(ng->attrs)); - return nnvm::NodeEntry{ng, 0, 0}; + return nnvm::NodeEntry(std::move(ng), 0, 0); } // remove zero in the sum. at least keep 1. @@ -244,7 +244,7 @@ nnvm::NodeEntry AggregateGradient(std::vector&& v) { sum_node->attrs.dict["num_args"] = std::to_string(v.size()); sum_node->attrs.op->attr_parser(&(sum_node->attrs)); sum_node->inputs = std::move(v); - return nnvm::NodeEntry{sum_node, 0, 0}; + return nnvm::NodeEntry(std::move(sum_node), 0, 0); } else { // use a stream line of plus instead nnvm::NodeEntry ret = v[0]; @@ -274,7 +274,7 @@ nnvm::NodeEntry AggregateGradient(std::vector&& v) { x->attrs.op = ewise_plus_op; x->attrs.name = os.str(); x->inputs = {ret, v[i]}; - ret = nnvm::NodeEntry{x, 0, 0}; + ret = nnvm::NodeEntry(std::move(x), 0, 0); } // identity node is used to avoid exposure of dummy plus node // when its output get assigned to another space. @@ -323,7 +323,7 @@ nnvm::Graph GraphExecutor::InitFullGraph(nnvm::Symbol symbol, } if (!need_grad_) return g; for (size_t i = 0; i < g.outputs.size(); ++i) { - NodeEntry ngrad{nnvm::Node::Create(), 0, 0}; + NodeEntry ngrad; head_grad_entry_.emplace_back(AttrHint(ngrad, g.outputs[i])); head_grad_map_[ngrad.node.get()] = i; } diff --git a/src/imperative/cached_op.cc b/src/imperative/cached_op.cc index dbc1cbfd5745..8d51c70da5ae 100644 --- a/src/imperative/cached_op.cc +++ b/src/imperative/cached_op.cc @@ -160,7 +160,7 @@ CachedOp::CachedOp( { ograd_entries_.reserve(fwd_graph_.outputs.size()); for (size_t i = 0; i < fwd_graph_.outputs.size(); ++i) { - ograd_entries_.emplace_back(NodeEntry{Node::Create(), 0, 0}); + ograd_entries_.emplace_back(); } std::vector xs; @@ -169,7 +169,7 @@ CachedOp::CachedOp( auto nid = idx.input_nodes()[i]; if (idx.mutable_input_nodes().count(nid)) continue; fwd_input_to_grad_output_[i] = xs.size(); - xs.emplace_back(NodeEntry{idx[nid].weak_ref.lock(), 0, 0}); + xs.emplace_back(idx[nid].weak_ref.lock(), 0, 0); } CHECK_GT(xs.size(), 0) diff --git a/src/imperative/imperative.cc b/src/imperative/imperative.cc index a1c41ee0df6b..21cf33220dba 100644 --- a/src/imperative/imperative.cc +++ b/src/imperative/imperative.cc @@ -305,7 +305,7 @@ std::vector Imperative::Backward( std::vector ograd_entries; ograd_entries.reserve(ograds.size()); for (size_t i = 0; i < outputs.size(); ++i) { - ograd_entries.emplace_back(NodeEntry{Node::Create(), 0, 0}); + ograd_entries.emplace_back(); AGInfo& info = AGInfo::Create(ograd_entries.back().node); info.ctx = outputs[i]->ctx(); if (ograds[i] != nullptr) { diff --git a/src/nnvm/legacy_op_util.cc b/src/nnvm/legacy_op_util.cc index 16ad0053e29a..e738a5a47695 100644 --- a/src/nnvm/legacy_op_util.cc +++ b/src/nnvm/legacy_op_util.cc @@ -321,9 +321,10 @@ inline std::vector OpPropGradient( const NodePtr& ptr, const std::vector& out_grads) { auto& prop = nnvm::get(ptr->attrs.parsed); - std::vector out_data(prop.outputs.size()); - for (uint32_t i = 0; i < out_data.size(); ++i) { - out_data[i] = NodeEntry{ptr, i, 0}; + std::vector out_data; + out_data.reserve(prop.outputs.size()); + for (size_t i = 0; i < out_data.size(); ++i) { + out_data.emplace_back(ptr, i, 0); } std::vector in_data( ptr->inputs.begin(), ptr->inputs.begin() + prop.arguments.size()); @@ -331,7 +332,7 @@ inline std::vector OpPropGradient( out_grads.begin(), out_grads.begin() + prop.ptr->NumVisibleOutputs()); auto inputs = prop.ptr->BackwardInputs(ograd, in_data, out_data); // add all the auxiliary data - for (uint32_t i = 0; i < prop.aux_states.size(); ++i) { + for (size_t i = 0; i < prop.aux_states.size(); ++i) { inputs.emplace_back(ptr->inputs[i + prop.arguments.size()]); } NodePtr gnode = Node::Create(); @@ -340,17 +341,15 @@ inline std::vector OpPropGradient( gnode->attrs = ptr->attrs; gnode->attrs.op = back_op; gnode->attrs.name = ptr->attrs.name + "_backward"; - std::vector in_grad(prop.arguments.size()); - for (uint32_t i = 0; i < prop.arguments.size(); ++i) { - in_grad[i] = NodeEntry{gnode, i, 0}; + std::vector in_grad; + in_grad.reserve(prop.arguments.size() + prop.aux_states.size()); + for (size_t i = 0; i < prop.arguments.size(); ++i) { + in_grad.emplace_back(gnode, i, 0); } // attach no gradient node to forbid gradient on aux_state if (prop.aux_states.size() != 0) { - NodePtr ng = Node::Create(); - ng->attrs.op = Op::Get("_NoGradient"); - ng->attrs.name = "NoGradient"; - for (uint32_t i = 0; i < prop.aux_states.size(); ++i) { - in_grad.emplace_back(NodeEntry{ng, 0, 0}); + for (size_t i = 0; i < prop.aux_states.size(); ++i) { + in_grad.emplace_back(Node::Create(Op::Get("_NoGradient"), "NoGradient"), 0, 0); } } return in_grad; diff --git a/src/operator/nn/lrn.cc b/src/operator/nn/lrn.cc index b632e35b57fe..3a3ca59f2be1 100644 --- a/src/operator/nn/lrn.cc +++ b/src/operator/nn/lrn.cc @@ -77,7 +77,7 @@ struct LRNGrad { std::vector heads; heads.push_back(ograds[0]); // out_grad heads.push_back(n->inputs[lrn_enum::kData]); - heads.emplace_back(nnvm::NodeEntry{n, lrn_enum::kTmpNorm, 0}); + heads.emplace_back(n, lrn_enum::kTmpNorm, 0); return MakeGradNode(op_name, n, heads, n->attrs.dict); } }; diff --git a/src/operator/operator_common.h b/src/operator/operator_common.h index 59f572211d0e..0328dfa7de26 100644 --- a/src/operator/operator_common.h +++ b/src/operator/operator_common.h @@ -447,7 +447,7 @@ inline std::vector MakeNonlossGradNode( p->inputs.insert(p->inputs.end(), inputs.begin(), inputs.end()); std::vector ret; for (uint32_t i = 0; i < p->num_outputs(); ++i) { - ret.emplace_back(nnvm::NodeEntry{p, i, 0}); + ret.emplace_back(std::move(p), i, 0); } return ret; } diff --git a/src/operator/tensor/elemwise_sum.cc b/src/operator/tensor/elemwise_sum.cc index 77044cb3995e..dec57633be22 100644 --- a/src/operator/tensor/elemwise_sum.cc +++ b/src/operator/tensor/elemwise_sum.cc @@ -49,8 +49,8 @@ std::vector ElementWiseSumGrad( nnvm::Op::Get("identity"); CHECK_EQ(ograds.size(), 1); std::vector ret; - nnvm::NodeEntry n_out{n, 0, 0}; - for (size_t i = 0; i < n->inputs.size(); i++) { + nnvm::NodeEntry n_out(n, 0, 0); + for (size_t i = 0; i < n->inputs.size(); ++i) { nnvm::NodePtr id_node = nnvm::Node::Create(); id_node->attrs.op = copy_op; id_node->inputs = {ograds[0]};