Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

[MXNET-779]Add DLPack Transformation API #12047

Merged
merged 31 commits into from
Sep 22, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
31 commits
Select commit Hold shift + click to select a range
822706e
add dlpack convertor api
wkcn Aug 3, 2018
8aac3da
Merge branch 'master' of https://github.com/apache/incubator-mxnet in…
wkcn Aug 3, 2018
ab6fa85
add to_dlpack and from_dlpack for NDArray
wkcn Aug 6, 2018
8c6e9d2
fix dlpack deleter and add unittest for dlpack
wkcn Aug 6, 2018
9fdfa7d
Merge branch 'master' of https://github.com/apache/incubator-mxnet in…
wkcn Aug 6, 2018
1142787
update 3rdparty
wkcn Aug 6, 2018
16df8d5
fix for cpplint
wkcn Aug 6, 2018
bfcffa2
fix pylint and add destructor for dlpack
wkcn Aug 6, 2018
f5c2552
fix pylint in base.py
wkcn Aug 6, 2018
98b5d11
fix lint in base.py
wkcn Aug 6, 2018
7bdde8f
add document for DLPack transformation API
wkcn Aug 6, 2018
f225d27
add to_dlpack_for_read and to_dlpack_for_write
wkcn Aug 7, 2018
afc1518
fix lint for ndarray.py and fix typo in c_api.h
wkcn Aug 7, 2018
8b397fd
fix function name error in c_api
wkcn Aug 7, 2018
d48074a
update code indent in tensor_blob.h ans c_api.cc, remove unused type …
wkcn Aug 7, 2018
58c5d87
use MXNDArrayToDLPack in c_api and add compactness check in TBlob
wkcn Aug 9, 2018
72edbf8
merge master and fix merge conflict
wkcn Aug 11, 2018
ef8ffcd
use python function as destructor of DLPack
wkcn Aug 11, 2018
afa1898
remove unused PyObjectHandle and update DLDataTypeTransform
wkcn Aug 11, 2018
a4d3aee
update from_dlpack code
wkcn Aug 11, 2018
493deb0
fix pylint in ndarray.py
wkcn Aug 11, 2018
adf36ef
rename dlpack after using it
wkcn Aug 12, 2018
26db4d0
merge master
wkcn Aug 13, 2018
dec838d
DLManagedTensor manages itself
wkcn Aug 22, 2018
850c3dc
add deleter for TBlob and Chunk in NDArray
wkcn Aug 22, 2018
fc99323
remove used code in python/mxnet/base.py
wkcn Aug 22, 2018
ffe60c6
retrigger CI
wkcn Aug 22, 2018
cbb17c3
add deleter for shared_ptr<Chunk>
wkcn Sep 10, 2018
e56be1f
Merge branch 'master' into DLPack-convertor-API
wkcn Sep 10, 2018
b1204bc
compilation okay
wkcn Sep 10, 2018
fe1387f
fix cpplint
wkcn Sep 10, 2018
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 36 additions & 0 deletions include/mxnet/c_api.h
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,8 @@ typedef void *CudaModuleHandle;
typedef void *CudaKernelHandle;
/*! \brief handle to a Profile object (domain, duration, counter, etc.) */
typedef void *ProfileHandle;
/*! \brief handle to DLManagedTensor*/
typedef void *DLManagedTensorHandle;

typedef void (*ExecutorMonitorCallback)(const char*,
NDArrayHandle,
Expand Down Expand Up @@ -746,6 +748,40 @@ MXNET_DLL int MXNDArrayGetShape(NDArrayHandle handle,
*/
MXNET_DLL int MXNDArrayGetData(NDArrayHandle handle,
void **out_pdata);
/*!
* \brief Create a reference view of NDArray that
* represents as DLManagedTensor
* Notice: MXNet uses asynchronous execution. Please call MXNDArrayWaitToRead or
* MXNDArrayWaitToWrite before calling MXNDArrayToDLPack.
* \param handle the handle to the ndarray
* \param out_dlpack pointer holder to get pointer of DLManagedTensor
* \return 0 when success, -1 when failure happens
*/
MXNET_DLL int MXNDArrayToDLPack(NDArrayHandle handle,
DLManagedTensorHandle *out_dlpack);

/*!
* \brief Create a NDArray backed by a dlpack tensor.
*
* This allows us to create a NDArray using the memory
* allocated by an external deep learning framework
* that is DLPack compatible.
*
* The memory is retained until the NDArray went out of scope.
*
* \param dlpack the pointer of the input DLManagedTensor
* \param out_handle pointer holder to get pointer of NDArray
* \return 0 when success, -1 when failure happens
*/
MXNET_DLL int MXNDArrayFromDLPack(DLManagedTensorHandle dlpack,
NDArrayHandle *out_handle);
/*!
* \brief Delete a dlpack tensor
* \param dlpack the pointer of the input DLManagedTensor
* \return 0 when success, -1 when failure happens
*/
MXNET_DLL int MXNDArrayCallDLPackDeleter(DLManagedTensorHandle dlpack);

/*!
* \brief get the type of the data in NDArray
* \param handle the handle to the narray
Expand Down
40 changes: 40 additions & 0 deletions include/mxnet/ndarray.h
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,26 @@ class NDArray {
dtype_(data.type_flag_), storage_type_(kDefaultStorage),
entry_({nullptr, 0, 0}) {
}

/*!
* \brief constructing a static NDArray that shares data with TBlob which is with deleter
* Use with caution: allocate ONLY ONE NDArray for each TBlob,
* make sure the memory region is available through out the life of NDArray
* \param data the memory content of static data
* \param dev_id the device id this tensor sits at
* \param deleter the function pointer of custom deleter
*/
NDArray(const TBlob &data, int dev_id, const std::function<void()>& deleter)
: ptr_(new Chunk(data, dev_id),
[deleter](Chunk *p) {
deleter(); // call custom deleter
delete p; // delete Chunk object
}),
shape_(data.shape_),
dtype_(data.type_flag_), storage_type_(kDefaultStorage),
entry_({nullptr, 0, 0}) {
}

/*! \brief create ndarray from shared memory */
NDArray(int shared_pid, int shared_id, const TShape& shape, int dtype)
: ptr_(std::make_shared<Chunk>(shared_pid, shared_id, shape, dtype)), shape_(shape),
Expand Down Expand Up @@ -523,6 +543,26 @@ class NDArray {
return ret;
}

/*!
* \brief Create a reference view of NDArray that
* represents as DLManagedTensor.
* \return A DLManagedTensor
*/
DLManagedTensor* ToDLPack() const;

/*!
* \brief Create a NDArray backed by a dlpack tensor.
*
* This allows us to create a NDArray using the memory
* allocated by an external deep learning framework
* that is DLPack compatible.
*
* The memory is retained until the NDArray went out of scope.
*
* \return The created NDArray view.
*/
static NDArray FromDLPack(const DLManagedTensor* tensor);

/*!
* \brief Update ndarray chunk storage handles using existing ndarray storage handles
* Also update the aux_handle, aux_shapes and aux_types.
Expand Down
65 changes: 64 additions & 1 deletion include/mxnet/tensor_blob.h
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,39 @@ class TBlob {
: dptr_(dptr), shape_(shape), type_flag_(type_flag) {
SetDLTensor(dev_mask, dev_id);
}
/*!
* \brief constructor that construct TBlob from DLTensor
* \param DLTensor Object
*/
explicit TBlob(const DLTensor &dltensor)
: dptr_(dltensor.data),
shape_(TShape(dltensor.shape, dltensor.shape + dltensor.ndim)),
type_flag_(DLDataTypeTransform(dltensor.dtype)),
dltensor_(dltensor) {
// compactness check for DLTensor
if (dltensor.strides != nullptr) {
// check strides
const int &ndim = dltensor.ndim;
const int64_t *shape = dltensor.shape;
const int64_t *strides = dltensor.strides;
if (ndim >= 1) {
bool err = false;
if (strides[ndim - 1] != 1) {
err = true;
} else {
for (int i = ndim - 2; i >= 0; --i) {
if (strides[i] != shape[i + 1] * strides[i + 1]) {
err = true;
break;
}
}
}
if (err) {
LOG(FATAL) << "Unsupported DLPack because MXNet only support compact tensor now";
}
}
}
}
/*!
* \brief constructor from tensor
* \param src source tensor
Expand Down Expand Up @@ -336,14 +369,44 @@ class TBlob {
}
}
}
static int DLDataTypeTransform(DLDataType dldata_type) {
if (dldata_type.lanes != 1) {
LOG(FATAL) << "Unsupported DLDataType whose lanes != 1";
}
switch (dldata_type.code) {
case kDLFloat:
switch (dldata_type.bits) {
case 16: return mshadow::kFloat16;
case 32: return mshadow::kFloat32;
case 64: return mshadow::kFloat64;
}
break;
case kDLUInt:
switch (dldata_type.bits) {
case 8: return mshadow::kUint8;
}
break;
case kDLInt:
switch (dldata_type.bits) {
case 8: return mshadow::kInt8;
case 32: return mshadow::kInt32;
case 64: return mshadow::kInt64;
}
break;
}
LOG(FATAL) << "Unknown DLDataType{" << dldata_type.code
<< ", " << dldata_type.bits
<< ", " << dldata_type.lanes << "}";
return mshadow::kFloat32;
}

inline void SetDLTensor(int dev_mask, int dev_id) {
dltensor_.data = dptr_;
dltensor_.ctx = DLContext{static_cast<DLDeviceType>(dev_mask), dev_id};
dltensor_.ndim = shape_.ndim();
dltensor_.dtype = DTypeTransform(type_flag_);
dltensor_.shape = shape_.data();
dltensor_.strides = NULL;
dltensor_.strides = nullptr;
dltensor_.byte_offset = 0;
}

Expand Down
4 changes: 4 additions & 0 deletions python/mxnet/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -232,6 +232,7 @@ def _load_lib():
CudaModuleHandle = ctypes.c_void_p
CudaKernelHandle = ctypes.c_void_p
ProfileHandle = ctypes.c_void_p
DLPackHandle = ctypes.c_void_p


#----------------------------
Expand Down Expand Up @@ -726,3 +727,6 @@ def write_all_str(module_file, module_all_list):
module_op_file.close()
write_all_str(module_internal_file, module_internal_all)
module_internal_file.close()

ctypes.pythonapi.PyCapsule_New.restype = ctypes.py_object
ctypes.pythonapi.PyCapsule_GetPointer.restype = ctypes.c_void_p
Loading