Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
Update NDarray with NodeEntry constructors and refine initializer lists
Browse files Browse the repository at this point in the history
  • Loading branch information
larroy committed Apr 11, 2019
1 parent 7b9a321 commit 4edc749
Show file tree
Hide file tree
Showing 2 changed files with 34 additions and 22 deletions.
52 changes: 32 additions & 20 deletions include/mxnet/ndarray.h
Original file line number Diff line number Diff line change
Expand Up @@ -94,8 +94,11 @@ class NDArray {
NDArray(const mxnet::TShape &shape, Context ctx,
bool delay_alloc = false, int dtype = mshadow::default_type_flag)
: ptr_(std::make_shared<Chunk>(shape, ctx, delay_alloc, dtype)),
shape_(shape), dtype_(dtype), storage_type_(kDefaultStorage),
entry_({nullptr, 0, 0}) {
shape_(shape),
dtype_(dtype),
storage_type_(kDefaultStorage),
entry_()
{
}
/*! \brief constructor for NDArray with storage type
*/
Expand All @@ -109,11 +112,13 @@ class NDArray {
* \param ctx context of NDArray
* \param dtype data type of this ndarray
*/
explicit NDArray(Context ctx, int dtype = mshadow::default_type_flag) {
ptr_ = std::make_shared<Chunk>(mxnet::TShape(mshadow::Shape1(0)), ctx, true, dtype);
dtype_ = dtype;
storage_type_ = kDefaultStorage;
entry_ = {nullptr, 0, 0};
explicit NDArray(Context ctx, int dtype = mshadow::default_type_flag)
: ptr_(std::make_shared<Chunk>(mxnet::TShape(mshadow::Shape1(0)), ctx, true, dtype)),
shape_(),
dtype_(dtype),
storage_type_(kDefaultStorage),
entry_()
{
}
/*!
* \brief constructing a static NDArray that shares data with TBlob
Expand All @@ -123,9 +128,11 @@ class NDArray {
* \param dev_id the device id this tensor sits at
*/
NDArray(const TBlob &data, int dev_id)
: ptr_(std::make_shared<Chunk>(data, dev_id)), shape_(data.shape_),
dtype_(data.type_flag_), storage_type_(kDefaultStorage),
entry_({nullptr, 0, 0}) {
: ptr_(std::make_shared<Chunk>(data, dev_id)),
shape_(data.shape_),
dtype_(data.type_flag_),
storage_type_(kDefaultStorage),
entry_() {
}

/*!
Expand All @@ -137,20 +144,22 @@ class NDArray {
* \param deleter the function pointer of custom deleter
*/
NDArray(const TBlob &data, int dev_id, const std::function<void()>& deleter)
: ptr_(new Chunk(data, dev_id),
[deleter](Chunk *p) {
deleter(); // call custom deleter
delete p; // delete Chunk object
: ptr_(new Chunk(data, dev_id), [deleter](Chunk *p) {
deleter(); // call custom deleter
delete p; // delete Chunk object
}),
shape_(data.shape_),
dtype_(data.type_flag_), storage_type_(kDefaultStorage),
entry_({nullptr, 0, 0}) {
entry_() {
}

/*! \brief create ndarray from shared memory */
NDArray(int shared_pid, int shared_id, const mxnet::TShape& shape, int dtype)
: ptr_(std::make_shared<Chunk>(shared_pid, shared_id, shape, dtype)), shape_(shape),
dtype_(dtype), storage_type_(kDefaultStorage), entry_({nullptr, 0, 0}) {
: ptr_(std::make_shared<Chunk>(shared_pid, shared_id, shape, dtype)),
shape_(shape),
dtype_(dtype),
storage_type_(kDefaultStorage),
entry_() {
}

/*!
Expand All @@ -165,8 +174,11 @@ class NDArray {
*/
NDArray(const NDArrayStorageType stype, const mxnet::TShape &shape,
const TBlob &data, const std::vector<TBlob> &aux_data, int dev_id)
: ptr_(std::make_shared<Chunk>(stype, data, aux_data, dev_id)), shape_(shape),
dtype_(data.type_flag_), storage_type_(stype), entry_({nullptr, 0, 0}) {
: ptr_(std::make_shared<Chunk>(stype, data, aux_data, dev_id)),
shape_(shape),
dtype_(data.type_flag_),
storage_type_(stype),
entry_() {
}
/*!
* \brief initialize the NDArray, assuming it is not assigned a meaningful shape before
Expand Down Expand Up @@ -640,7 +652,7 @@ class NDArray {
*/
NDArray Detach() const {
NDArray ret(*this);
ret.entry_ = nnvm::NodeEntry{nullptr, 0, 0};
ret.entry_ = nnvm::NodeEntry();
return ret;
}

Expand Down
4 changes: 2 additions & 2 deletions src/operator/elemwise_op_common.h
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,7 @@ struct ElemwiseGradUseOut {
std::vector<nnvm::NodeEntry> heads;
uint32_t n_out = n->num_outputs();
for (uint32_t i = 0; i < n_out; ++i) {
heads.emplace_back(nnvm::NodeEntry{n, i, 0});
heads.emplace_back(n, i, 0);
}
return MakeNonlossGradNode(op_name, n, ograds, heads, n->attrs.dict);
}
Expand All @@ -220,7 +220,7 @@ struct ElemwiseGradUseInOut {
}
uint32_t n_out = n->num_outputs();
for (uint32_t i = 0; i < n_out; ++i) {
heads.emplace_back(nnvm::NodeEntry{n, i, 0});
heads.emplace_back(n, i, 0);
}
return MakeGradNode(op_name, n, heads, n->attrs.dict);
}
Expand Down

0 comments on commit 4edc749

Please sign in to comment.