diff --git a/.gitmodules b/.gitmodules index 836d824a6f5a..0a7b72db5d14 100644 --- a/.gitmodules +++ b/.gitmodules @@ -25,7 +25,7 @@ url = https://github.com/dmlc/cub [submodule "3rdparty/tvm"] path = 3rdparty/tvm - url = https://github.com/dmlc/tvm + url = https://github.com/larroy/tvm [submodule "3rdparty/onnx-tensorrt"] path = 3rdparty/onnx-tensorrt url = https://github.com/onnx/onnx-tensorrt.git diff --git a/3rdparty/tvm b/3rdparty/tvm index 0f053c82a747..e6df3d2fc099 160000 --- a/3rdparty/tvm +++ b/3rdparty/tvm @@ -1 +1 @@ -Subproject commit 0f053c82a747b4dcdf49570ec87c17e0067b7439 +Subproject commit e6df3d2fc099291411308e983fa7edd2df8a669a diff --git a/src/c_api/c_api_function.cc b/src/c_api/c_api_function.cc index 7091be2e72c5..4744f829a838 100644 --- a/src/c_api/c_api_function.cc +++ b/src/c_api/c_api_function.cc @@ -56,7 +56,7 @@ std::vector Gradient( std::vector ret; for (uint32_t i = 0; i < g->num_outputs(); ++i) { - ret.emplace_back(nnvm::NodeEntry{g, i, 0}); + ret.emplace_back(std::move(g), i, 0); } return ret; diff --git a/src/executor/graph_executor.cc b/src/executor/graph_executor.cc index 8302dc133c64..2f045caf9a6d 100644 --- a/src/executor/graph_executor.cc +++ b/src/executor/graph_executor.cc @@ -146,7 +146,7 @@ nnvm::NodeEntry AggregateGradient(std::vector&& v) { ng->attrs.op = Op::Get("_zeros_without_dtype"); ng->attrs.name = "zeros_without_dtype"; ng->attrs.op->attr_parser(&(ng->attrs)); - return nnvm::NodeEntry{ng, 0, 0}; + return nnvm::NodeEntry(std::move(ng), 0, 0); } // remove zero in the sum. at least keep 1. @@ -167,7 +167,7 @@ nnvm::NodeEntry AggregateGradient(std::vector&& v) { sum_node->attrs.dict["num_args"] = std::to_string(v.size()); sum_node->attrs.op->attr_parser(&(sum_node->attrs)); sum_node->inputs = std::move(v); - return nnvm::NodeEntry{sum_node, 0, 0}; + return nnvm::NodeEntry(std::move(sum_node), 0, 0); } else { // use a stream line of plus instead nnvm::NodeEntry ret = v[0]; @@ -197,7 +197,7 @@ nnvm::NodeEntry AggregateGradient(std::vector&& v) { x->attrs.op = ewise_plus_op; x->attrs.name = os.str(); x->inputs = {ret, v[i]}; - ret = nnvm::NodeEntry{x, 0, 0}; + ret = nnvm::NodeEntry(std::move(x), 0, 0); } // identity node is used to avoid exposure of dummy plus node // when its output get assigned to another space. @@ -246,7 +246,7 @@ nnvm::Graph GraphExecutor::InitFullGraph(nnvm::Symbol symbol, } if (!need_grad_) return g; for (size_t i = 0; i < g.outputs.size(); ++i) { - NodeEntry ngrad{nnvm::Node::Create(), 0, 0}; + NodeEntry ngrad; head_grad_entry_.emplace_back(AttrHint(ngrad, g.outputs[i])); head_grad_map_[ngrad.node.get()] = i; } diff --git a/src/imperative/cached_op.cc b/src/imperative/cached_op.cc index 58ec4e65b846..3143024d788e 100644 --- a/src/imperative/cached_op.cc +++ b/src/imperative/cached_op.cc @@ -160,7 +160,7 @@ CachedOp::CachedOp( { ograd_entries_.reserve(fwd_graph_.outputs.size()); for (size_t i = 0; i < fwd_graph_.outputs.size(); ++i) { - ograd_entries_.emplace_back(NodeEntry{Node::Create(), 0, 0}); + ograd_entries_.emplace_back(); } std::vector xs; @@ -169,7 +169,7 @@ CachedOp::CachedOp( auto nid = idx.input_nodes()[i]; if (idx.mutable_input_nodes().count(nid)) continue; fwd_input_to_grad_output_[i] = xs.size(); - xs.emplace_back(NodeEntry{idx[nid].weak_ref.lock(), 0, 0}); + xs.emplace_back(idx[nid].weak_ref.lock(), 0, 0); } CHECK_GT(xs.size(), 0) diff --git a/src/imperative/imperative.cc b/src/imperative/imperative.cc index a381b2384113..3d8958d26888 100644 --- a/src/imperative/imperative.cc +++ b/src/imperative/imperative.cc @@ -303,7 +303,7 @@ std::vector Imperative::Backward( std::vector ograd_entries; ograd_entries.reserve(ograds.size()); for (size_t i = 0; i < outputs.size(); ++i) { - ograd_entries.emplace_back(NodeEntry{Node::Create(), 0, 0}); + ograd_entries.emplace_back(); AGInfo& info = AGInfo::Create(ograd_entries.back().node); info.ctx = outputs[i]->ctx(); if (ograds[i] != nullptr) { diff --git a/src/nnvm/legacy_op_util.cc b/src/nnvm/legacy_op_util.cc index 4ab777b6adb7..6eaf70c4760f 100644 --- a/src/nnvm/legacy_op_util.cc +++ b/src/nnvm/legacy_op_util.cc @@ -321,9 +321,10 @@ inline std::vector OpPropGradient( const NodePtr& ptr, const std::vector& out_grads) { auto& prop = nnvm::get(ptr->attrs.parsed); - std::vector out_data(prop.outputs.size()); - for (uint32_t i = 0; i < out_data.size(); ++i) { - out_data[i] = NodeEntry{ptr, i, 0}; + std::vector out_data; + out_data.reserve(prop.outputs.size()); + for (size_t i = 0; i < out_data.size(); ++i) { + out_data.emplace_back(ptr, i, 0); } std::vector in_data( ptr->inputs.begin(), ptr->inputs.begin() + prop.arguments.size()); @@ -331,7 +332,7 @@ inline std::vector OpPropGradient( out_grads.begin(), out_grads.begin() + prop.ptr->NumVisibleOutputs()); auto inputs = prop.ptr->BackwardInputs(ograd, in_data, out_data); // add all the auxiliary data - for (uint32_t i = 0; i < prop.aux_states.size(); ++i) { + for (size_t i = 0; i < prop.aux_states.size(); ++i) { inputs.emplace_back(ptr->inputs[i + prop.arguments.size()]); } NodePtr gnode = Node::Create(); @@ -340,17 +341,15 @@ inline std::vector OpPropGradient( gnode->attrs = ptr->attrs; gnode->attrs.op = back_op; gnode->attrs.name = ptr->attrs.name + "_backward"; - std::vector in_grad(prop.arguments.size()); - for (uint32_t i = 0; i < prop.arguments.size(); ++i) { - in_grad[i] = NodeEntry{gnode, i, 0}; + std::vector in_grad; + in_grad.reserve(prop.arguments.size() + prop.aux_states.size()); + for (size_t i = 0; i < prop.arguments.size(); ++i) { + in_grad.emplace_back(gnode, i, 0); } // attach no gradient node to forbid gradient on aux_state if (prop.aux_states.size() != 0) { - NodePtr ng = Node::Create(); - ng->attrs.op = Op::Get("_NoGradient"); - ng->attrs.name = "NoGradient"; - for (uint32_t i = 0; i < prop.aux_states.size(); ++i) { - in_grad.emplace_back(NodeEntry{ng, 0, 0}); + for (size_t i = 0; i < prop.aux_states.size(); ++i) { + in_grad.emplace_back(Node::Create(Op::Get("_NoGradient"), "NoGradient"), 0, 0); } } return in_grad; diff --git a/src/operator/custom/custom.cc b/src/operator/custom/custom.cc index 2643abbe9e5f..d45cc208ae14 100644 --- a/src/operator/custom/custom.cc +++ b/src/operator/custom/custom.cc @@ -225,7 +225,7 @@ std::vector Gradient( size_t i = static_cast(t); if (i >= params.num_outs + params.num_args) { uint32_t idx = static_cast(i-params.num_outs-params.num_args); - g->inputs.push_back(nnvm::NodeEntry{n, idx, 0}); + g->inputs.push_back(n, idx, 0); } else if (i >= params.num_outs) { g->inputs.push_back(n->inputs[i-params.num_outs]); } else { diff --git a/src/operator/nn/lrn.cc b/src/operator/nn/lrn.cc index 020cb479acc6..86099ae359b6 100644 --- a/src/operator/nn/lrn.cc +++ b/src/operator/nn/lrn.cc @@ -77,7 +77,7 @@ struct LRNGrad { std::vector heads; heads.push_back(ograds[0]); // out_grad heads.push_back(n->inputs[lrn_enum::kData]); - heads.emplace_back(nnvm::NodeEntry{n, lrn_enum::kTmpNorm, 0}); + heads.emplace_back(n, lrn_enum::kTmpNorm, 0); return MakeGradNode(op_name, n, heads, n->attrs.dict); } }; diff --git a/src/operator/operator_common.h b/src/operator/operator_common.h index b1822647cf01..a8447912ac7d 100644 --- a/src/operator/operator_common.h +++ b/src/operator/operator_common.h @@ -446,7 +446,7 @@ inline std::vector MakeNonlossGradNode( p->inputs.insert(p->inputs.end(), inputs.begin(), inputs.end()); std::vector ret; for (uint32_t i = 0; i < p->num_outputs(); ++i) { - ret.emplace_back(nnvm::NodeEntry{p, i, 0}); + ret.emplace_back(std::move(p), i, 0); } return ret; } diff --git a/src/operator/tensor/elemwise_sum.cc b/src/operator/tensor/elemwise_sum.cc index 85b58b6e0f3e..d3479291c6c1 100644 --- a/src/operator/tensor/elemwise_sum.cc +++ b/src/operator/tensor/elemwise_sum.cc @@ -49,12 +49,12 @@ std::vector ElementWiseSumGrad( nnvm::Op::Get("identity"); CHECK_EQ(ograds.size(), 1); std::vector ret; - nnvm::NodeEntry n_out{n, 0, 0}; - for (size_t i = 0; i < n->inputs.size(); i++) { + nnvm::NodeEntry n_out(n, 0, 0); + for (size_t i = 0; i < n->inputs.size(); ++i) { nnvm::NodePtr id_node = nnvm::Node::Create(); id_node->attrs.op = copy_op; id_node->inputs = {ograds[0]}; - ret.push_back(nnvm::NodeEntry{id_node, 0, 0}); + ret.emplace_back(id_node, 0, 0); } return ret; }