Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
Delete deprecated functions
Browse files Browse the repository at this point in the history
  • Loading branch information
reminisce committed May 5, 2017
1 parent 49a0d15 commit 66db99a
Show file tree
Hide file tree
Showing 2 changed files with 0 additions and 295 deletions.
270 changes: 0 additions & 270 deletions src/executor/graph_executor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -241,69 +241,6 @@ nnvm::Graph GraphExecutor::InitFullGraph(nnvm::Symbol symbol,
return g;
}

nnvm::Graph GraphExecutor::InitFullGraphV1(
nnvm::Symbol symbol,
const std::vector<OpReqType>& grad_req_type,
const std::vector<NDArray>& arg_grad_store) {
using nnvm::NodePtr;
using nnvm::NodeEntry;
// initial information
num_forward_outputs_ = symbol.outputs.size();
num_forward_inputs_ = symbol.ListInputs(nnvm::Symbol::kAll).size();

nnvm::Graph g;
g.outputs = symbol.outputs;
bool need_grad = false;
for (OpReqType req : grad_req_type) {
if (req != kNullOp) need_grad = true;
}
if (!need_grad) return g;
for (size_t i = 0; i < g.outputs.size(); ++i) {
NodeEntry ngrad{nnvm::Node::Create(), 0, 0};
head_grad_entry_.emplace_back(AttrHint(ngrad, g.outputs[i]));
head_grad_map_[ngrad.node.get()] = i;
}
std::vector<NodePtr> args = symbol.ListInputs(nnvm::Symbol::kReadOnlyArgs);
std::vector<NodeEntry> xs;
for (size_t i = 0; i < grad_req_type.size(); ++i) {
if (grad_req_type[i] != kNullOp) {
grad_store_.emplace_back(
std::make_pair(grad_req_type[i], arg_grad_store[i]));
xs.emplace_back(NodeEntry{args[i], 0, 0});
}
}

int do_mirror = dmlc::GetEnv("MXNET_BACKWARD_DO_MIRROR", 0);
auto need_mirror = [do_mirror](const nnvm::Node& node) -> int {
if (node.is_variable()) return 0;
const std::string& type = node.attrs.op->name;
if (type == "Dropout") return false;
if (get_node_attr(node, "__force_mirroring__", false)) return true;
if (do_mirror == 0) return false;
if (type == "Convolution") return false;
if (type == "FullyConnected") return false;
if (type == "Concat") return false;
if (type == "SoftmaxOutput") return false;
if (type == "CuDNNBatchNorm") return false;
return true;
};

std::vector<const nnvm::Op*> zero_ops;
zero_ops.push_back(nnvm::Op::Get("zeros_like"));
zero_ops.push_back(nnvm::Op::Get("_zeros"));

// take gradient
nnvm::Graph g_grad = nnvm::pass::Gradient(
g, symbol.outputs, xs, head_grad_entry_,
AggregateGradient, need_mirror, nullptr,
zero_ops);
CHECK_EQ(g_grad.outputs.size(), xs.size());
for (const auto &e : g_grad.outputs) {
g.outputs.push_back(e);
}
return g;
}

/*!
* \brief Assign context to the graph.
* This is triggered by both simple_bind and bind flows.
Expand Down Expand Up @@ -409,98 +346,6 @@ Graph AssignContext(Graph g,
return g;
}

// pass to assign context to the graph
Graph AssignContextV1(Graph g,
const Context& default_ctx,
const std::map<std::string, Context>& ctx_map,
const std::vector<NDArray>& in_args,
const std::vector<std::pair<OpReqType, NDArray> >& grad_store,
const std::vector<NDArray>& aux_states,
size_t num_forward_inputs,
size_t num_forward_outputs) {
const auto& idx = g.indexed_graph();
const auto& mutable_nodes = idx.mutable_input_nodes();
// default use default context.
if (ctx_map.size() == 0) {
g.attrs["context"] = std::make_shared<nnvm::any>(
ContextVector(idx.num_nodes(), default_ctx));
for (const auto& x : in_args) {
CHECK(x.ctx() == default_ctx)
<< "Input array is in " << x.ctx() << " while binding with ctx=" << default_ctx
<< ". All arguments must be in global context (" << default_ctx
<< ") unless group2ctx is specified for cross-device graph.";
}
for (const auto& x : grad_store) {
CHECK(x.second.ctx() == default_ctx)
<< "Gradient array is in " << x.second.ctx() << " while binding with ctx="
<< default_ctx << ". All gradients must be in global context (" << default_ctx
<< ") unless group2ctx is specified for cross-device graph.";
}
return g;
}
// otherwise, use context assignment.
std::map<Context, int> ctx2id;
std::vector<Context> ctx_list;
nnvm::DeviceVector device(idx.num_nodes(), -1);
nnvm::DeviceAssignMap device_map;

for (auto &kv : ctx_map) {
if (ctx2id.count(kv.second) == 0) {
ctx2id[kv.second] = static_cast<int>(ctx_list.size());
ctx_list.push_back(kv.second);
}
device_map[kv.first] = ctx2id.at(kv.second);
}

size_t arg_top = 0, aux_top = 0;
for (size_t i = 0; i < num_forward_inputs; ++i) {
const uint32_t nid = idx.input_nodes().at(i);
Context ctx;
if (mutable_nodes.count(nid)) {
CHECK_LT(aux_top, aux_states.size());
ctx = aux_states[aux_top].ctx();
++aux_top;
} else {
CHECK_LT(arg_top, in_args.size());
ctx = in_args[arg_top].ctx();
++arg_top;
}
if (ctx2id.count(ctx) == 0) {
ctx2id[ctx] = static_cast<int>(ctx_list.size());
ctx_list.push_back(ctx);
}
device[nid] = ctx2id.at(ctx);
}
for (size_t i = num_forward_outputs; i < g.outputs.size(); ++i) {
const uint32_t nid = idx.outputs()[i].node_id;
Context ctx = grad_store[i - num_forward_outputs].second.ctx();
if (ctx2id.count(ctx) == 0) {
ctx2id[ctx] = static_cast<int>(ctx_list.size());
ctx_list.push_back(ctx);
}
int devid = ctx2id.at(ctx);
if (device[nid] != -1) {
CHECK_EQ(device[nid], devid) << "device of same output not equal to each other";
} else {
device[nid] = devid;
}
}
g.attrs["device"] = std::make_shared<dmlc::any>(std::move(device));
g = nnvm::pass::PlaceDevice(g, "__ctx_group__", device_map, "_CrossDeviceCopy");
const auto& assigned_device = g.GetAttr<nnvm::DeviceVector>("device");

ContextVector vcontext;
for (size_t i = 0; i < assigned_device.size(); ++i) {
if (assigned_device[i] == -1) {
vcontext.push_back(default_ctx);
} else {
vcontext.push_back(ctx_list[assigned_device[i]]);
}
}
g.attrs["context"] = std::make_shared<nnvm::any>(std::move(vcontext));
return g;
}

/*!
* \brief GraphExecutor initializer for regular bind flow in which
* input arguments and gradients are provided by users. This initializer
Expand Down Expand Up @@ -571,46 +416,6 @@ void GraphExecutor::Init(nnvm::Symbol symbol,
FinishInitGraph(symbol, g, shared_exec, feed_dict);
}

void GraphExecutor::InitV1(nnvm::Symbol symbol,
const Context& default_ctx,
const std::map<std::string, Context>& ctx_map,
const std::vector<NDArray>& in_args,
const std::vector<NDArray>& arg_grad_store,
const std::vector<OpReqType>& grad_req_type,
const std::vector<NDArray>& aux_states,
Executor* shared_exec,
const nnvm::NodeEntryMap<NDArray>& feed_dict) {
nnvm::Graph g = InitGraphV1(symbol, default_ctx,
ctx_map, in_args, arg_grad_store,
grad_req_type, aux_states, feed_dict);
g.attrs["saved_opr"] = std::make_shared<nnvm::any>(std::move(saved_opr_));
g = AttachOpExecs(g);
g = AttachOpResources(g);
graph_ = std::move(g);
if (shared_exec != nullptr) {
this->InitDataEntryMemory(&(dynamic_cast<GraphExecutor*>(shared_exec)->data_pool_));
} else {
this->InitDataEntryMemory(nullptr);
}
{
// initialize output arrays
auto& idx = graph_.indexed_graph();
for (size_t i = 0; i < num_forward_outputs_; ++i) {
auto& e = idx.outputs()[i];
output_arrays_.push_back(data_entry_[idx.entry_id(e)]);
}
// initialize head gradient array
head_grad_array_.resize(symbol.outputs.size());
for (size_t i = num_forward_inputs_; i < idx.input_nodes().size(); ++i) {
uint32_t nid = idx.input_nodes().at(i);
uint32_t oid = head_grad_map_.at(idx[nid].source);
head_grad_array_[oid] = data_entry_[idx.entry_id(nid, 0)];
}
}
this->InitCachedOps();
this->InitOpSegs();
}

/*!
* \brief Initialize in_args, arg_grads, and aux_states
* and their data_entry_ of the executor. This function
Expand Down Expand Up @@ -928,81 +733,6 @@ void GraphExecutor::Init(nnvm::Symbol symbol,
FinishInitGraph(symbol, g, shared_exec, feed_dict);
}

Graph GraphExecutor::InitGraphV1(nnvm::Symbol symbol,
const Context& default_ctx,
const std::map<std::string, Context>& ctx_map,
const std::vector<NDArray>& in_args,
const std::vector<NDArray>& arg_grad_store,
const std::vector<OpReqType>& grad_req_type,
const std::vector<NDArray>& aux_states,
const nnvm::NodeEntryMap<NDArray>& feed_dict) {
// setup gradient
nnvm::Graph g = InitFullGraphV1(symbol, grad_req_type, arg_grad_store);
g = AssignContextV1(g, default_ctx, ctx_map,
in_args,
grad_store_,
aux_states,
num_forward_inputs_,
num_forward_outputs_);
const auto& idx = g.indexed_graph();
// get number of nodes used in forward pass
num_forward_nodes_ = 0;
for (size_t i = 0; i < num_forward_outputs_; ++i) {
num_forward_nodes_ = std::max(
num_forward_nodes_, static_cast<size_t>(idx.outputs()[i].node_id + 1));
}
// Setup data entry, shape and type.
data_entry_.resize(idx.num_node_entries());
auto mutable_nodes = idx.mutable_input_nodes();
nnvm::ShapeVector arg_shapes;
nnvm::DTypeVector arg_types;
size_t arg_top = 0, aux_top = 0;
for (size_t i = 0; i < num_forward_inputs_; ++i) {
const uint32_t nid = idx.input_nodes().at(i);
if (mutable_nodes.count(nid)) {
CHECK_LT(aux_top, aux_states.size());
data_entry_[idx.entry_id(nid, 0)] = aux_states[aux_top];
arg_shapes.push_back(aux_states[aux_top].shape());
arg_types.push_back(aux_states[aux_top].dtype());
++aux_top;
} else {
CHECK_LT(arg_top, in_args.size());
data_entry_[idx.entry_id(nid, 0)] = in_args[arg_top];
arg_shapes.push_back(in_args[arg_top].shape());
arg_types.push_back(in_args[arg_top].dtype());
++arg_top;
}
}
for (size_t j = num_forward_outputs_; j < idx.outputs().size(); ++j) {
data_entry_[idx.entry_id(idx.outputs()[j])]
= grad_store_[j - num_forward_outputs_].second;
}
arg_shapes.resize(idx.input_nodes().size(), TShape());
arg_types.resize(idx.input_nodes().size(), -1);
// other initializations
g = nnvm::pass::InferShape(g, arg_shapes, "__shape__");
g = nnvm::pass::InferType(g, arg_types, "__dtype__");

{
// memory allocator
const int kBadStorageID = -1;
const int kExternalStorageID = -2;
nnvm::StorageVector arg_storage_id(idx.num_node_entries(), kBadStorageID);
for (size_t j = num_forward_outputs_; j < idx.outputs().size(); ++j) {
arg_storage_id[idx.entry_id(idx.outputs()[j])] = kExternalStorageID;
}
for (const auto& kv : feed_dict) {
uint32_t eid = idx.entry_id(kv.first);
data_entry_[eid] = kv.second;
arg_storage_id[eid] = kExternalStorageID;
}
g.attrs["storage"] = std::make_shared<dmlc::any>(std::move(arg_storage_id));
g = nnvm::ApplyPass(g, "PlanMemory");
}
g = DetectInplaceAddTo(g);
return g;
}

/*!
* \brief This function is triggered by both simple_bind
* and bind flows.
Expand Down
25 changes: 0 additions & 25 deletions src/executor/graph_executor.h
Original file line number Diff line number Diff line change
Expand Up @@ -58,17 +58,6 @@ class GraphExecutor : public Executor {
const nnvm::NodeEntryMap<NDArray>& feed_dict
= nnvm::NodeEntryMap<NDArray>());

// initialized the executor
void InitV1(nnvm::Symbol symbol,
const Context& default_ctx,
const std::map<std::string, Context>& ctx_map,
const std::vector<NDArray>& in_args,
const std::vector<NDArray>& arg_grad_store,
const std::vector<OpReqType>& grad_req_type,
const std::vector<NDArray>& aux_states,
Executor* shared_exec = nullptr,
const nnvm::NodeEntryMap<NDArray>& feed_dict
= nnvm::NodeEntryMap<NDArray>());
// initialize executor for bind
void Init(nnvm::Symbol symbol,
const Context& default_ctx,
Expand Down Expand Up @@ -162,16 +151,6 @@ class GraphExecutor : public Executor {
std::vector<NDArray>* in_arg_vec,
std::vector<NDArray>* arg_grad_vec,
std::vector<NDArray>* aux_state_vec);
// internal initialization of the graph.
Graph InitGraphV1(nnvm::Symbol symbol,
const Context& default_ctx,
const std::map<std::string, Context>& ctx_map,
const std::vector<NDArray>& in_args,
const std::vector<NDArray>& arg_grad_store,
const std::vector<OpReqType>& grad_req_type,
const std::vector<NDArray>& aux_states,
const nnvm::NodeEntryMap<NDArray>& feed_dict
= nnvm::NodeEntryMap<NDArray>());
// internal initialization of the graph for simple bind
Graph InitGraph(nnvm::Symbol symbol,
const Context& default_ctx,
Expand All @@ -180,10 +159,6 @@ class GraphExecutor : public Executor {
const std::vector<Context>& arg_grad_ctxes,
const std::vector<Context>& aux_state_ctxes,
const std::vector<OpReqType>& grad_req_types);
// initialize the full graph, including gradient.
Graph InitFullGraphV1(nnvm::Symbol symbol,
const std::vector<OpReqType>& grad_req_type,
const std::vector<NDArray>& arg_grad_store);
// intialize the full graph for simple bind, including gradient
Graph InitFullGraph(nnvm::Symbol symbol,
const std::vector<OpReqType>& grad_req_types);
Expand Down

0 comments on commit 66db99a

Please sign in to comment.