Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
init commit
Browse files Browse the repository at this point in the history
  • Loading branch information
Rohit Kumar Srivastava committed Jul 19, 2019
1 parent 6bdcef2 commit 2d31323
Show file tree
Hide file tree
Showing 10 changed files with 166 additions and 55 deletions.
35 changes: 27 additions & 8 deletions include/mxnet/c_api.h
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,9 @@ extern "C" {
#endif

/*! \brief manually define unsigned int */
typedef unsigned int mx_uint;
typedef uint32_t mx_uint;
/*! \brief manually define 64-bit int */
typedef int64_t mx_int64;
/*! \brief manually define float */
typedef float mx_float;
/*! \brief data type to store dim size */
Expand Down Expand Up @@ -549,13 +551,20 @@ MXNET_DLL int MXNDArrayCreate(const mx_uint *shape,
* \return 0 when success, -1 when failure happens
*/
MXNET_DLL int MXNDArrayCreateEx(const mx_uint *shape,
mx_uint ndim,
int dev_type,
int dev_id,
int delay_alloc,
int dtype,
NDArrayHandle *out);
mx_uint ndim,
int dev_type,
int dev_id,
int delay_alloc,
int dtype,
NDArrayHandle *out);

MXNET_DLL int MXNDArrayCreateExInt64(const mx_int64 *shape,
mx_uint ndim,
int dev_type,
int dev_id,
int delay_alloc,
int dtype,
NDArrayHandle *out);

/*!
* \brief create an empty sparse NDArray with specified shape and data type
Expand Down Expand Up @@ -793,6 +802,11 @@ MXNET_DLL int MXNDArrayReshape64(NDArrayHandle handle,
MXNET_DLL int MXNDArrayGetShape(NDArrayHandle handle,
mx_uint *out_dim,
const mx_uint **out_pdata);

MXNET_DLL int MXNDArrayGetShapeInt64(NDArrayHandle handle,
mx_uint *out_dim,
const mx_int64 **out_pdata);

/*!
* \brief get the shape of the array
* \param handle the handle to the narray
Expand All @@ -803,6 +817,11 @@ MXNET_DLL int MXNDArrayGetShape(NDArrayHandle handle,
MXNET_DLL int MXNDArrayGetShapeEx(NDArrayHandle handle,
int *out_dim,
const int **out_pdata);

MXNET_DLL int MXNDArrayGetShapeExInt64(NDArrayHandle handle,
int *out_dim,
const int64_t **out_pdata);

/*!
* \brief get the content of the data in NDArray
* \param handle the handle to the ndarray
Expand Down Expand Up @@ -1456,7 +1475,7 @@ MXNET_DLL int MXSymbolListOutputs(SymbolHandle symbol,
* \return 0 when success, -1 when failure happens
*/
MXNET_DLL int MXSymbolGetNumOutputs(SymbolHandle symbol,
mx_uint *output_count);
mx_uint *output_count);

/*!
* \brief Get a symbol that contains all the internals.
Expand Down
3 changes: 2 additions & 1 deletion include/mxnet/c_predict_api.h
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,8 @@ extern "C" {
#endif

/*! \brief manually define unsigned int */
typedef unsigned int mx_uint;
typedef int64_t mx_int64;
typedef uint32_t mx_uint;
/*! \brief manually define float */
typedef float mx_float;
/*! \brief handle to Predictor */
Expand Down
1 change: 0 additions & 1 deletion include/mxnet/tuple.h
Original file line number Diff line number Diff line change
Expand Up @@ -366,7 +366,6 @@ class Tuple {
}
};


/*! brief check if a shape's ndim is known. */
inline bool ndim_is_known(const int ndim) {
CHECK_GE(ndim, -1) << "shape ndim must be >= -1, while received " << ndim;
Expand Down
1 change: 1 addition & 0 deletions python/mxnet/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -215,6 +215,7 @@ def _load_lib():
# type definitions
mx_int = ctypes.c_int
mx_uint = ctypes.c_uint
mx_int64 = ctypes.c_int64
mx_float = ctypes.c_float
mx_float_p = ctypes.POINTER(mx_float)
mx_real_t = _np.float32
Expand Down
45 changes: 32 additions & 13 deletions python/mxnet/ndarray/ndarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,9 @@
import numpy as np
from ..base import _LIB, numeric_types, integer_types
from ..base import c_str, c_array, c_array_buf, c_handle_array, mx_real_t
from ..base import mx_uint, NDArrayHandle, check_call, DLPackHandle, mx_int
from ..base import mx_uint, NDArrayHandle, check_call, DLPackHandle, mx_int, mx_int64
from ..base import ctypes2buffer
from ..runtime import Features
from ..context import Context, current_context
from . import _internal
from . import op
Expand Down Expand Up @@ -131,15 +132,26 @@ def _new_alloc_handle(shape, ctx, delay_alloc, dtype=mx_real_t):
A new empty `NDArray` handle.
"""
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayCreateEx(
c_array_buf(mx_uint, native_array('I', shape)),
mx_uint(len(shape)),
ctypes.c_int(ctx.device_typeid),
ctypes.c_int(ctx.device_id),
ctypes.c_int(int(delay_alloc)),
ctypes.c_int(int(_DTYPE_NP_TO_MX[np.dtype(dtype).type])),
ctypes.byref(hdl)))
return hdl
if Features().is_enabled('INT64_TENSOR_SIZE'):
print("LTS")
check_call(_LIB.MXNDArrayCreateExInt64(
c_array_buf(mx_int64, native_array('q', shape)),
mx_int64(len(shape)),
ctypes.c_int(ctx.device_typeid),
ctypes.c_int(ctx.device_id),
ctypes.c_int(int(delay_alloc)),
ctypes.c_int(int(_DTYPE_NP_TO_MX[np.dtype(dtype).type])),
ctypes.byref(hdl)))
else:
print("No-LTS")
check_call(_LIB.MXNDArrayCreateEx(
c_array_buf(mx_uint, native_array('I', shape)),
mx_uint(len(shape)),
ctypes.c_int(ctx.device_typeid),
ctypes.c_int(ctx.device_id),
ctypes.c_int(int(delay_alloc)),
ctypes.c_int(int(_DTYPE_NP_TO_MX[np.dtype(dtype).type])),
ctypes.byref(hdl)))


def _new_from_shared_mem(shared_pid, shared_id, shape, dtype):
Expand Down Expand Up @@ -1847,9 +1859,16 @@ def shape(self):
(2L, 3L, 4L)
"""
ndim = mx_int()
pdata = ctypes.POINTER(mx_int)()
check_call(_LIB.MXNDArrayGetShapeEx(
self.handle, ctypes.byref(ndim), ctypes.byref(pdata)))
if Features().is_enabled('INT64_TENSOR_SIZE'):
print("LTS")
pdata = ctypes.POINTER(mx_int64)()
check_call(_LIB.MXNDArrayGetShapeExInt64(
self.handle, ctypes.byref(ndim), ctypes.byref(pdata)))
else:
print("No-LTS")
pdata = ctypes.POINTER(mx_int)()
check_call(_LIB.MXNDArrayGetShapeEx(
self.handle, ctypes.byref(ndim), ctypes.byref(pdata)))
if ndim.value == -1:
return None
else:
Expand Down
56 changes: 50 additions & 6 deletions src/c_api/c_api.cc
Original file line number Diff line number Diff line change
Expand Up @@ -179,13 +179,29 @@ int MXNDArrayCreate(const mx_uint *shape,
API_END();
}

int MXNDArrayCreateExInt64(const int64_t *shape,
mx_uint ndim,
int dev_type,
int dev_id,
int delay_alloc,
int dtype,
NDArrayHandle *out) {
API_BEGIN();
*out = new NDArray(
mxnet::TShape(shape, shape + ndim),
Context::Create(static_cast<Context::DeviceType>(dev_type), dev_id),
delay_alloc != 0,
dtype);
API_END();
}

int MXNDArrayCreateEx(const mx_uint *shape,
mx_uint ndim,
int dev_type,
int dev_id,
int delay_alloc,
int dtype,
NDArrayHandle *out) {
mx_uint ndim,
int dev_type,
int dev_id,
int delay_alloc,
int dtype,
NDArrayHandle *out) {
API_BEGIN();
*out = new NDArray(
mxnet::TShape(shape, shape + ndim),
Expand Down Expand Up @@ -513,6 +529,34 @@ int MXNDArrayGetShape(NDArrayHandle handle,
API_END();
}

int MXNDArrayGetShapeExInt64(NDArrayHandle handle,
int *out_dim,
const int64_t **out_pdata) {
MXAPIThreadLocalEntry *ret = MXAPIThreadLocalStore::Get();
API_BEGIN();
NDArray *arr = static_cast<NDArray*>(handle);
if (!arr->is_none()) {
mxnet::TShape s = arr->shape();
if (!Imperative::Get()->is_np_shape()) {
common::ConvertToLegacyShape(&s);
}
*out_dim = s.ndim();
if (s.ndim() >= 0) {
std::vector<int64_t> &buffer = ret->arg_shape_buffer_ex_int64;
buffer.resize(s.ndim());
mxnet::ShapeTypeCast(s.begin(), s.end(), buffer.data());
*out_pdata = buffer.data();
}
} else {
if (Imperative::Get()->is_np_shape()) {
*out_dim = -1;
} else {
*out_dim = 0;
}
}
API_END();
}

int MXNDArrayGetShapeEx(NDArrayHandle handle,
int *out_dim,
const int **out_pdata) {
Expand Down
28 changes: 28 additions & 0 deletions src/c_api/c_api_common.h
Original file line number Diff line number Diff line change
Expand Up @@ -81,10 +81,14 @@ struct MXAPIThreadLocalEntry {
std::vector<const mx_uint*> arg_shape_data, out_shape_data, aux_shape_data;
/*! \brief result holder for returning shape pointer */
std::vector<const int*> arg_shape_data_ex, out_shape_data_ex, aux_shape_data_ex;
std::vector<const int64_t*> arg_shape_data_ex_int64, out_shape_data_ex_int64,
aux_shape_data_ex_int64;
/*! \brief uint32_t buffer for returning shape pointer */
std::vector<uint32_t> arg_shape_buffer, out_shape_buffer, aux_shape_buffer;
/*! \brief uint32_t buffer for returning shape pointer */
std::vector<int> arg_shape_buffer_ex, out_shape_buffer_ex, aux_shape_buffer_ex;
std::vector<int64_t> arg_shape_buffer_ex_int64, out_shape_buffer_ex_int64,
aux_shape_buffer_ex_int64;
/*! \brief bool buffer */
std::vector<bool> save_inputs, save_outputs;
// DEPRECATED. Use SetupShapeArrayReturnWithBufferEx instead.
Expand Down Expand Up @@ -130,6 +134,30 @@ struct MXAPIThreadLocalEntry {
}
}
}

inline static void SetupShapeArrayReturnWithBufferExInt64(
const mxnet::ShapeVector &shapes,
std::vector<int> *ndim,
std::vector<const int64_t*> *data,
std::vector<int64_t> *buffer) {
ndim->resize(shapes.size());
data->resize(shapes.size());
size_t size = 0;
for (const auto& s : shapes) {
if (s.ndim() > 0) {
size += s.ndim();
}
}
buffer->resize(size);
int64_t *ptr = buffer->data();
for (size_t i = 0; i < shapes.size(); ++i) {
ndim->at(i) = shapes[i].ndim();
data->at(i) = ptr;
if (shapes[i].ndim() > 0) {
ptr = mxnet::ShapeTypeCast(shapes[i].begin(), shapes[i].end(), ptr);
}
}
}
};

// define the threadlocal store.
Expand Down
42 changes: 21 additions & 21 deletions src/operator/tensor/matrix_op-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -647,9 +647,9 @@ void SliceEx(const nnvm::NodeAttrs& attrs,

template<int ndim>
inline void GetIndexRange(const mxnet::TShape& dshape,
const mxnet::Tuple<dmlc::optional<int>>& param_begin,
const mxnet::Tuple<dmlc::optional<int>>& param_end,
const mxnet::Tuple<dmlc::optional<int>>& param_step,
const mxnet::Tuple<dmlc::optional<index_t>>& param_begin,
const mxnet::Tuple<dmlc::optional<index_t>>& param_end,
const mxnet::Tuple<dmlc::optional<index_t>>& param_step,
common::StaticArray<index_t, ndim>* begin,
common::StaticArray<index_t, ndim>* end,
common::StaticArray<index_t, ndim>* step) {
Expand Down Expand Up @@ -1010,8 +1010,8 @@ void SliceAssignOpForward(const nnvm::NodeAttrs& attrs,

struct SliceAssignScalarParam : public dmlc::Parameter<SliceAssignScalarParam> {
double scalar;
mxnet::Tuple<dmlc::optional<int>> begin, end;
mxnet::Tuple<dmlc::optional<int>> step;
mxnet::Tuple<dmlc::optional<index_t>> begin, end;
mxnet::Tuple<dmlc::optional<index_t>> step;
DMLC_DECLARE_PARAMETER(SliceAssignScalarParam) {
DMLC_DECLARE_FIELD(scalar)
.set_default(0)
Expand All @@ -1021,7 +1021,7 @@ struct SliceAssignScalarParam : public dmlc::Parameter<SliceAssignScalarParam> {
DMLC_DECLARE_FIELD(end)
.describe("ending indices for the slice operation, supports negative indices.");
DMLC_DECLARE_FIELD(step)
.set_default(mxnet::Tuple<dmlc::optional<int>>())
.set_default(mxnet::Tuple<dmlc::optional<index_t>>())
.describe("step for the slice operation, supports negative values.");
}
};
Expand Down Expand Up @@ -1323,12 +1323,12 @@ inline bool SliceLikeShape(const nnvm::NodeAttrs& attrs,
inline void SliceLikeInferRanges(const mxnet::TShape& dshape,
const mxnet::TShape& fshape,
const mxnet::Tuple<int>& axes,
mxnet::Tuple<dmlc::optional<int>>* param_begin,
mxnet::Tuple<dmlc::optional<int>>* param_end,
mxnet::Tuple<dmlc::optional<int>>* param_step) {
std::vector<dmlc::optional<int>> pb(dshape.ndim());
std::vector<dmlc::optional<int>> pe(dshape.ndim());
std::vector<dmlc::optional<int>> ps(dshape.ndim());
mxnet::Tuple<dmlc::optional<index_t>>* param_begin,
mxnet::Tuple<dmlc::optional<index_t>>* param_end,
mxnet::Tuple<dmlc::optional<index_t>>* param_step) {
std::vector<dmlc::optional<index_t>> pb(dshape.ndim());
std::vector<dmlc::optional<index_t>> pe(dshape.ndim());
std::vector<dmlc::optional<index_t>> ps(dshape.ndim());
if (axes.ndim() == 0) {
for (int i = 0; i < dshape.ndim(); ++i) {
pb[i] = 0;
Expand All @@ -1352,9 +1352,9 @@ inline void SliceLikeInferRanges(const mxnet::TShape& dshape,
ps[axis] = 1;
}
}
*param_begin = mxnet::Tuple<dmlc::optional<int>>(pb.begin(), pb.end());
*param_end = mxnet::Tuple<dmlc::optional<int>>(pe.begin(), pe.end());
*param_step = mxnet::Tuple<dmlc::optional<int>>(ps.begin(), ps.end());
*param_begin = mxnet::Tuple<dmlc::optional<index_t>>(pb.begin(), pb.end());
*param_end = mxnet::Tuple<dmlc::optional<index_t>>(pe.begin(), pe.end());
*param_step = mxnet::Tuple<dmlc::optional<index_t>>(ps.begin(), ps.end());
}

template<typename xpu>
Expand All @@ -1373,9 +1373,9 @@ void SliceLikeForward(const nnvm::NodeAttrs& attrs,
const TBlob& out = outputs[0];
const mxnet::TShape& ishape = data.shape_;
const mxnet::TShape& from_shape = inputs[1].shape_;
mxnet::Tuple<dmlc::optional<int>> param_begin;
mxnet::Tuple<dmlc::optional<int>> param_end;
mxnet::Tuple<dmlc::optional<int>> param_step;
mxnet::Tuple<dmlc::optional<index_t>> param_begin;
mxnet::Tuple<dmlc::optional<index_t>> param_end;
mxnet::Tuple<dmlc::optional<index_t>> param_step;
SliceLikeInferRanges(ishape, from_shape, param.axes, &param_begin, &param_end, &param_step);

MXNET_NDIM_SWITCH(data.ndim(), ndim, {
Expand Down Expand Up @@ -1421,9 +1421,9 @@ void SliceLikeBackward(const nnvm::NodeAttrs& attrs,

const mxnet::TShape& ishape = ograd.shape_;
const mxnet::TShape& from_shape = outputs[1].shape_;
mxnet::Tuple<dmlc::optional<int>> param_begin;
mxnet::Tuple<dmlc::optional<int>> param_end;
mxnet::Tuple<dmlc::optional<int>> param_step;
mxnet::Tuple<dmlc::optional<index_t>> param_begin;
mxnet::Tuple<dmlc::optional<index_t>> param_end;
mxnet::Tuple<dmlc::optional<index_t>> param_step;
SliceLikeInferRanges(ishape, from_shape, param.axes, &param_begin, &param_end, &param_step);

MXNET_NDIM_SWITCH(ograd.ndim(), ndim, {
Expand Down
6 changes: 3 additions & 3 deletions src/operator/tensor/slice-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,15 +34,15 @@ namespace mxnet {
namespace op {

struct SliceParam : public dmlc::Parameter<SliceParam> {
mxnet::Tuple<dmlc::optional<int>> begin, end;
mxnet::Tuple<dmlc::optional<int>> step;
mxnet::Tuple<dmlc::optional<index_t>> begin, end;
mxnet::Tuple<dmlc::optional<index_t>> step;
DMLC_DECLARE_PARAMETER(SliceParam) {
DMLC_DECLARE_FIELD(begin)
.describe("starting indices for the slice operation, supports negative indices.");
DMLC_DECLARE_FIELD(end)
.describe("ending indices for the slice operation, supports negative indices.");
DMLC_DECLARE_FIELD(step)
.set_default(mxnet::Tuple<dmlc::optional<int>>())
.set_default(mxnet::Tuple<dmlc::optional<index_t>>())
.describe("step for the slice operation, supports negative values.");
}
bool operator==(const SliceParam& other) const {
Expand Down
Loading

0 comments on commit 2d31323

Please sign in to comment.