diff --git a/.gitmodules b/.gitmodules index e0ffec11bfd0..151b9fe39a86 100644 --- a/.gitmodules +++ b/.gitmodules @@ -22,7 +22,7 @@ branch = master [submodule "3rdparty/tvm"] path = 3rdparty/tvm - url = https://github.com/dmlc/tvm + url = https://github.com/larroy/tvm [submodule "3rdparty/onnx-tensorrt"] path = 3rdparty/onnx-tensorrt url = https://github.com/onnx/onnx-tensorrt.git diff --git a/3rdparty/tvm b/3rdparty/tvm index 0f053c82a747..e6df3d2fc099 160000 --- a/3rdparty/tvm +++ b/3rdparty/tvm @@ -1 +1 @@ -Subproject commit 0f053c82a747b4dcdf49570ec87c17e0067b7439 +Subproject commit e6df3d2fc099291411308e983fa7edd2df8a669a diff --git a/src/c_api/c_api_function.cc b/src/c_api/c_api_function.cc index 50f9b32d6e47..3adc581b50a5 100644 --- a/src/c_api/c_api_function.cc +++ b/src/c_api/c_api_function.cc @@ -56,7 +56,7 @@ std::vector Gradient( std::vector ret; for (uint32_t i = 0; i < g->num_outputs(); ++i) { - ret.emplace_back(nnvm::NodeEntry{g, i, 0}); + ret.emplace_back(std::move(g), i, 0); } return ret; diff --git a/src/executor/graph_executor.cc b/src/executor/graph_executor.cc index 4a4505581920..21a617cb505f 100644 --- a/src/executor/graph_executor.cc +++ b/src/executor/graph_executor.cc @@ -147,7 +147,7 @@ nnvm::NodeEntry AggregateGradient(std::vector&& v) { ng->attrs.op = Op::Get("_zeros_without_dtype"); ng->attrs.name = "zeros_without_dtype"; ng->attrs.op->attr_parser(&(ng->attrs)); - return nnvm::NodeEntry{ng, 0, 0}; + return nnvm::NodeEntry(std::move(ng), 0, 0); } // remove zero in the sum. at least keep 1. @@ -168,7 +168,7 @@ nnvm::NodeEntry AggregateGradient(std::vector&& v) { sum_node->attrs.dict["num_args"] = std::to_string(v.size()); sum_node->attrs.op->attr_parser(&(sum_node->attrs)); sum_node->inputs = std::move(v); - return nnvm::NodeEntry{sum_node, 0, 0}; + return nnvm::NodeEntry(std::move(sum_node), 0, 0); } else { // use a stream line of plus instead nnvm::NodeEntry ret = v[0]; @@ -198,7 +198,7 @@ nnvm::NodeEntry AggregateGradient(std::vector&& v) { x->attrs.op = ewise_plus_op; x->attrs.name = os.str(); x->inputs = {ret, v[i]}; - ret = nnvm::NodeEntry{x, 0, 0}; + ret = nnvm::NodeEntry(std::move(x), 0, 0); } // identity node is used to avoid exposure of dummy plus node // when its output get assigned to another space. @@ -247,7 +247,7 @@ nnvm::Graph GraphExecutor::InitFullGraph(nnvm::Symbol symbol, } if (!need_grad_) return g; for (size_t i = 0; i < g.outputs.size(); ++i) { - NodeEntry ngrad{nnvm::Node::Create(), 0, 0}; + NodeEntry ngrad; head_grad_entry_.emplace_back(AttrHint(ngrad, g.outputs[i])); head_grad_map_[ngrad.node.get()] = i; } diff --git a/src/imperative/cached_op.cc b/src/imperative/cached_op.cc index c9215c5c8827..c83761fba9b0 100644 --- a/src/imperative/cached_op.cc +++ b/src/imperative/cached_op.cc @@ -160,7 +160,7 @@ CachedOp::CachedOp( { ograd_entries_.reserve(fwd_graph_.outputs.size()); for (size_t i = 0; i < fwd_graph_.outputs.size(); ++i) { - ograd_entries_.emplace_back(NodeEntry{Node::Create(), 0, 0}); + ograd_entries_.emplace_back(); } std::vector xs; @@ -169,7 +169,7 @@ CachedOp::CachedOp( auto nid = idx.input_nodes()[i]; if (idx.mutable_input_nodes().count(nid)) continue; fwd_input_to_grad_output_[i] = xs.size(); - xs.emplace_back(NodeEntry{idx[nid].weak_ref.lock(), 0, 0}); + xs.emplace_back(idx[nid].weak_ref.lock(), 0, 0); } CHECK_GT(xs.size(), 0) diff --git a/src/imperative/imperative.cc b/src/imperative/imperative.cc index b027de0a0f6f..1597eee9ca26 100644 --- a/src/imperative/imperative.cc +++ b/src/imperative/imperative.cc @@ -305,7 +305,7 @@ std::vector Imperative::Backward( std::vector ograd_entries; ograd_entries.reserve(ograds.size()); for (size_t i = 0; i < outputs.size(); ++i) { - ograd_entries.emplace_back(NodeEntry{Node::Create(), 0, 0}); + ograd_entries.emplace_back(); AGInfo& info = AGInfo::Create(ograd_entries.back().node); info.ctx = outputs[i]->ctx(); if (ograds[i] != nullptr) { diff --git a/src/nnvm/legacy_op_util.cc b/src/nnvm/legacy_op_util.cc index 16ad0053e29a..e738a5a47695 100644 --- a/src/nnvm/legacy_op_util.cc +++ b/src/nnvm/legacy_op_util.cc @@ -321,9 +321,10 @@ inline std::vector OpPropGradient( const NodePtr& ptr, const std::vector& out_grads) { auto& prop = nnvm::get(ptr->attrs.parsed); - std::vector out_data(prop.outputs.size()); - for (uint32_t i = 0; i < out_data.size(); ++i) { - out_data[i] = NodeEntry{ptr, i, 0}; + std::vector out_data; + out_data.reserve(prop.outputs.size()); + for (size_t i = 0; i < out_data.size(); ++i) { + out_data.emplace_back(ptr, i, 0); } std::vector in_data( ptr->inputs.begin(), ptr->inputs.begin() + prop.arguments.size()); @@ -331,7 +332,7 @@ inline std::vector OpPropGradient( out_grads.begin(), out_grads.begin() + prop.ptr->NumVisibleOutputs()); auto inputs = prop.ptr->BackwardInputs(ograd, in_data, out_data); // add all the auxiliary data - for (uint32_t i = 0; i < prop.aux_states.size(); ++i) { + for (size_t i = 0; i < prop.aux_states.size(); ++i) { inputs.emplace_back(ptr->inputs[i + prop.arguments.size()]); } NodePtr gnode = Node::Create(); @@ -340,17 +341,15 @@ inline std::vector OpPropGradient( gnode->attrs = ptr->attrs; gnode->attrs.op = back_op; gnode->attrs.name = ptr->attrs.name + "_backward"; - std::vector in_grad(prop.arguments.size()); - for (uint32_t i = 0; i < prop.arguments.size(); ++i) { - in_grad[i] = NodeEntry{gnode, i, 0}; + std::vector in_grad; + in_grad.reserve(prop.arguments.size() + prop.aux_states.size()); + for (size_t i = 0; i < prop.arguments.size(); ++i) { + in_grad.emplace_back(gnode, i, 0); } // attach no gradient node to forbid gradient on aux_state if (prop.aux_states.size() != 0) { - NodePtr ng = Node::Create(); - ng->attrs.op = Op::Get("_NoGradient"); - ng->attrs.name = "NoGradient"; - for (uint32_t i = 0; i < prop.aux_states.size(); ++i) { - in_grad.emplace_back(NodeEntry{ng, 0, 0}); + for (size_t i = 0; i < prop.aux_states.size(); ++i) { + in_grad.emplace_back(Node::Create(Op::Get("_NoGradient"), "NoGradient"), 0, 0); } } return in_grad; diff --git a/src/operator/custom/custom.cc b/src/operator/custom/custom.cc index 412bfa1bc3aa..544fdc3a8dce 100644 --- a/src/operator/custom/custom.cc +++ b/src/operator/custom/custom.cc @@ -224,7 +224,7 @@ std::vector Gradient( size_t i = static_cast(t); if (i >= params.num_outs + params.num_args) { uint32_t idx = static_cast(i-params.num_outs-params.num_args); - g->inputs.push_back(nnvm::NodeEntry{n, idx, 0}); + g->inputs.push_back(n, idx, 0); } else if (i >= params.num_outs) { g->inputs.push_back(n->inputs[i-params.num_outs]); } else { diff --git a/src/operator/nn/lrn.cc b/src/operator/nn/lrn.cc index b632e35b57fe..3a3ca59f2be1 100644 --- a/src/operator/nn/lrn.cc +++ b/src/operator/nn/lrn.cc @@ -77,7 +77,7 @@ struct LRNGrad { std::vector heads; heads.push_back(ograds[0]); // out_grad heads.push_back(n->inputs[lrn_enum::kData]); - heads.emplace_back(nnvm::NodeEntry{n, lrn_enum::kTmpNorm, 0}); + heads.emplace_back(n, lrn_enum::kTmpNorm, 0); return MakeGradNode(op_name, n, heads, n->attrs.dict); } }; diff --git a/src/operator/operator_common.h b/src/operator/operator_common.h index 59f572211d0e..0328dfa7de26 100644 --- a/src/operator/operator_common.h +++ b/src/operator/operator_common.h @@ -447,7 +447,7 @@ inline std::vector MakeNonlossGradNode( p->inputs.insert(p->inputs.end(), inputs.begin(), inputs.end()); std::vector ret; for (uint32_t i = 0; i < p->num_outputs(); ++i) { - ret.emplace_back(nnvm::NodeEntry{p, i, 0}); + ret.emplace_back(std::move(p), i, 0); } return ret; } diff --git a/src/operator/tensor/elemwise_sum.cc b/src/operator/tensor/elemwise_sum.cc index f1ec8b5ad387..dec57633be22 100644 --- a/src/operator/tensor/elemwise_sum.cc +++ b/src/operator/tensor/elemwise_sum.cc @@ -49,12 +49,12 @@ std::vector ElementWiseSumGrad( nnvm::Op::Get("identity"); CHECK_EQ(ograds.size(), 1); std::vector ret; - nnvm::NodeEntry n_out{n, 0, 0}; - for (size_t i = 0; i < n->inputs.size(); i++) { + nnvm::NodeEntry n_out(n, 0, 0); + for (size_t i = 0; i < n->inputs.size(); ++i) { nnvm::NodePtr id_node = nnvm::Node::Create(); id_node->attrs.op = copy_op; id_node->inputs = {ograds[0]}; - ret.push_back(nnvm::NodeEntry{id_node, 0, 0}); + ret.emplace_back(id_node, 0, 0); } return ret; }