Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
Refactoring names for mxnet version of nnvm to avoid conflicting with…
Browse files Browse the repository at this point in the history
… the original tvm/nnvm. (#15303)
  • Loading branch information
adis300 authored and marcoabreu committed Nov 23, 2019
1 parent e3a7141 commit 20f8bbc
Show file tree
Hide file tree
Showing 4 changed files with 21 additions and 21 deletions.
4 changes: 2 additions & 2 deletions src/imperative/cached_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -473,7 +473,7 @@ bool CachedOp::SetForwardGraph(
storage[idx.entry_id(idx.outputs()[i])] = exec::kExternalStorageID;
}

auto mem_plan = PlanMemory(
auto mem_plan = MXPlanMemory(
&g, std::move(storage), g.GetAttr<std::vector<uint32_t> >(AddPrefix(prefix, REF_COUNT)),
AddPrefix(prefix, STORAGE_PLAN));
g.attrs[AddPrefix(prefix, MEM_PLAN)] =
Expand Down Expand Up @@ -601,7 +601,7 @@ bool CachedOp::SetBackwardGraph(
for (const auto i : idx.input_nodes()) storage[idx.entry_id(i, 0)] = exec::kExternalStorageID;
for (const auto i : idx.outputs()) storage[idx.entry_id(i)] = exec::kExternalStorageID;

auto mem_plan = PlanMemory(
auto mem_plan = MXPlanMemory(
&g, std::move(storage),
g.GetAttr<std::vector<uint32_t> >(AddPrefix(BACKWARD, REF_COUNT)),
AddPrefix(BACKWARD, STORAGE_PLAN),
Expand Down
2 changes: 1 addition & 1 deletion src/imperative/imperative_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -830,7 +830,7 @@ inline std::vector<Context> PlaceDevice(const nnvm::IndexedGraph& idx) {
}


inline MemoryPlanVector PlanMemory(
inline MemoryPlanVector MXPlanMemory(
nnvm::Graph* p_g,
nnvm::StorageVector&& storage,
const std::vector<uint32_t>& ref_count,
Expand Down
6 changes: 3 additions & 3 deletions src/nnvm/graph_algorithm.h
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ namespace pass {
* \param path the output path of nodes.
* \return the total reward of best path.
*/
inline uint32_t FindBestPath(
inline uint32_t MXFindBestPath(
const IndexedGraph& graph,
const std::vector<uint32_t>& node_reward,
std::vector<uint32_t>* path) {
Expand Down Expand Up @@ -89,7 +89,7 @@ inline uint32_t FindBestPath(
* \param color the color index of each of the node.
* \return the total number of colors.
*/
inline uint32_t ColorNodeGroup(
inline uint32_t MXColorNodeGroup(
const IndexedGraph &graph,
std::vector<uint32_t> node_importance,
uint32_t max_ncolor,
Expand All @@ -105,7 +105,7 @@ inline uint32_t ColorNodeGroup(
// All the nodes in the path cannot run in parallel.
for (cindex = 0; cindex < max_ncolor - 1; ++cindex) {
std::vector<uint32_t> path;
uint32_t reward = FindBestPath(graph, node_importance, &path);
uint32_t reward = MXFindBestPath(graph, node_importance, &path);
if (reward == 0) break;
for (uint32_t nid : path) {
if (node_importance[nid] != 0) {
Expand Down
30 changes: 15 additions & 15 deletions src/nnvm/plan_memory.cc
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ namespace pass {
namespace {
using namespace nnvm::top;
// Return bytes of data flag.
static int GetDTypeSize(int type_flag) {
static int MXGetDTypeSize(int type_flag) {
switch (type_flag) {
case kUint8:
case kInt8:
Expand All @@ -62,7 +62,7 @@ static int GetDTypeSize(int type_flag) {
}

// simple graph based allocator.
class GraphAllocator {
class MXGraphAllocator {
public:
// storage id equals integer.
using StorageID = int;
Expand Down Expand Up @@ -131,7 +131,7 @@ class GraphAllocator {
}

// constructor
explicit GraphAllocator(const IndexedGraph* idx, const size_t match_range) : idx_(idx) {
explicit MXGraphAllocator(const IndexedGraph* idx, const size_t match_range) : idx_(idx) {
this->Init(match_range, dmlc::GetEnv("NNVM_EXEC_NUM_TEMP", 1));
}

Expand All @@ -146,7 +146,7 @@ class GraphAllocator {
if ((*idx_)[nid].source->is_variable()) continue;
importance[nid] = 1;
}
num_match_color_ = pass::ColorNodeGroup(
num_match_color_ = pass::MXColorNodeGroup(
*idx_, importance, num_match_color_, &node_color_);
}
}
Expand Down Expand Up @@ -190,12 +190,12 @@ class GraphAllocator {
/*
* Internal method to perform the memory allocation for a graph
* */
size_t AllocMemory(const Graph& ret, const IndexedGraph& idx,
size_t MXAllocMemory(const Graph& ret, const IndexedGraph& idx,
const std::pair<uint32_t, uint32_t>& node_range,
StorageVector* storage_ptr,
std::vector<int>* storage_inplace_index_ptr,
const std::vector<uint32_t>& entry_ref_count,
GraphAllocator* allocator) {
MXGraphAllocator* allocator) {
static auto& finplace_option = Op::GetAttr<FInplaceOption>("FInplaceOption");
static auto& finplace_identity = Op::GetAttr<FInplaceIdentity>("FInplaceIdentity");
static auto& fignore_inputs = Op::GetAttr<FIgnoreInputs>("FIgnoreInputs");
Expand All @@ -213,7 +213,7 @@ size_t AllocMemory(const Graph& ret, const IndexedGraph& idx,
device_vec = &(ret.GetAttr<DeviceVector>("device"));
}
size_t num_not_allocated = 0;
std::vector<GraphAllocator::StorageID> storage_ref_count(idx.num_node_entries(), 0);
std::vector<MXGraphAllocator::StorageID> storage_ref_count(idx.num_node_entries(), 0);

for (uint32_t nid = node_range.first; nid < node_range.second; ++nid) {
const auto& inode = idx[nid];
Expand Down Expand Up @@ -247,13 +247,13 @@ size_t AllocMemory(const Graph& ret, const IndexedGraph& idx,
shape_vec[eid_out].Size() == shape_vec[eid_in].Size() &&
dtype_vec[eid_out] == dtype_vec[eid_in];
if (taken[kv.first] == false &&
sid_out == GraphAllocator::kBadStorageID &&
sid_out == MXGraphAllocator::kBadStorageID &&
sid_in >= 0 &&
((storage_ref_count[sid_in] == 1 && !ignore_all_inputs) || real_identity) &&
entry_ref_count[eid_out] > 0 &&
shape_vec[eid_out].Size() == shape_vec[eid_in].Size() &&
(dtype_vec[eid_out] == dtype_vec[eid_in] ||
GetDTypeSize(dtype_vec[eid_out]) == GetDTypeSize(dtype_vec[eid_in]))) {
MXGetDTypeSize(dtype_vec[eid_out]) == MXGetDTypeSize(dtype_vec[eid_in]))) {
// inplace optimization
taken[kv.first] = true;
storage[eid_out] = sid_in;
Expand All @@ -272,7 +272,7 @@ size_t AllocMemory(const Graph& ret, const IndexedGraph& idx,
for (uint32_t index = 0; index < inode.source->num_outputs(); ++index) {
uint32_t eid = idx.entry_id(nid, index);
// only request memory for kBadStorageID
if (storage[eid] == GraphAllocator::kBadStorageID) {
if (storage[eid] == MXGraphAllocator::kBadStorageID) {
auto &eshape = shape_vec[eid];
size_t esize = ndim_is_known(shape_vec[eid]) ? eshape.Size() : 0;
eids.insert(std::make_pair(esize, eid));
Expand Down Expand Up @@ -317,7 +317,7 @@ size_t AllocMemory(const Graph& ret, const IndexedGraph& idx,
// use -2 to indicate that the node was never touched.
storage_inplace_index[eid] = -2;
}
if (storage[eid] == GraphAllocator::kBadStorageID) {
if (storage[eid] == MXGraphAllocator::kBadStorageID) {
++num_not_allocated;
}
}
Expand All @@ -327,7 +327,7 @@ size_t AllocMemory(const Graph& ret, const IndexedGraph& idx,


// function to plan memory
Graph PlanMemory(Graph ret) {
Graph MXPlanMemory(Graph ret) {
// setup ref counter
const IndexedGraph& idx = ret.indexed_graph();
static auto& fignore_inputs = Op::GetAttr<FIgnoreInputs>("FIgnoreInputs");
Expand Down Expand Up @@ -380,11 +380,11 @@ Graph PlanMemory(Graph ret) {
std::vector<int> storage_inplace_index(idx.num_node_entries(), -1);

// the allocator
GraphAllocator allocator(&idx, match_range);
MXGraphAllocator allocator(&idx, match_range);

// number of entries that are not statically allocated.
size_t storage_num_not_allocated =
AllocMemory(ret, idx, node_range, &storage_vec, &storage_inplace_index,
MXAllocMemory(ret, idx, node_range, &storage_vec, &storage_inplace_index,
ref_count, &allocator);
size_t storage_allocated_bytes = allocator.TotalAllocBytes();

Expand All @@ -406,7 +406,7 @@ Graph PlanMemory(Graph ret) {

NNVM_REGISTER_PASS(MXPlanMemory)
.describe("Plan the memory allocation of each node entries.")
.set_body(PlanMemory)
.set_body(MXPlanMemory)
.set_change_graph(false)
.depend_graph_attr("dtype")
.depend_graph_attr("shape")
Expand Down

0 comments on commit 20f8bbc

Please sign in to comment.