Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

[MXNET-1185] [WIP] Support large array in several operators #13191

Closed
wants to merge 46 commits into from
Closed
Show file tree
Hide file tree
Changes from 11 commits
Commits
Show all changes
46 commits
Select commit Hold shift + click to select a range
a572198
Support large integer in operators
apeforest Nov 6, 2018
e37a06b
fix large array in sum
apeforest Nov 7, 2018
e48b274
Fix large array issue in slice operation
apeforest Nov 7, 2018
b183c3f
fix bug in shape
apeforest Nov 8, 2018
fcebf5a
fix getitem with large index
apeforest Nov 8, 2018
3c7557b
fix bug in slice operator
apeforest Nov 8, 2018
904f09b
fix bug in random uniform op
apeforest Nov 8, 2018
08bd8ab
add nightly test
apeforest Nov 8, 2018
244f386
fix lint error
apeforest Nov 9, 2018
3ecd257
fix compilation error on gpu
apeforest Nov 9, 2018
c70afe8
fix gpu compilation
apeforest Nov 9, 2018
ffcd175
fix build issue
apeforest Nov 9, 2018
dbe0e6c
fix windows build error
apeforest Nov 9, 2018
0680184
fix build issue in windows
apeforest Nov 9, 2018
8fda02a
fix omp build issue
apeforest Nov 9, 2018
87cd144
fix cpp-package build error
apeforest Nov 9, 2018
7afc7a8
fix mkldnn build
apeforest Nov 9, 2018
862be24
fix an array size bound
apeforest Nov 10, 2018
22213fa
add constants in tests
apeforest Nov 10, 2018
cbaa553
fix sparse array
apeforest Nov 13, 2018
7eca035
fix unit test
apeforest Nov 13, 2018
1b48d4a
fix unit test
apeforest Nov 14, 2018
cb2ee1e
fix R and scala package build
apeforest Nov 14, 2018
08471f2
Fix build error in scala, julia, perl
apeforest Nov 14, 2018
e5a3b32
fix a typo
apeforest Nov 14, 2018
629a7c5
fix R-package scala-package compiation error
apeforest Nov 14, 2018
2dd990a
fix scala unit test
apeforest Nov 14, 2018
5b0cd3a
fix python2 unit test
apeforest Nov 14, 2018
43ba3aa
fix scala unit test
apeforest Nov 15, 2018
5286f63
fix scala unit test
apeforest Nov 15, 2018
7247e6b
fix scala build
apeforest Nov 16, 2018
f8839b3
fix python unit test
apeforest Nov 16, 2018
e1cd1cd
update scala-package to fix unittest
apeforest Nov 17, 2018
e0f4e2d
Merge remote-tracking branch 'upstream/master' into bugfix/large-array
apeforest Nov 17, 2018
e0fe05c
fix scala unit test
apeforest Nov 19, 2018
024a0ce
fix array typecode for python 2 and python 3
apeforest Nov 19, 2018
1f3361b
lint it
apeforest Nov 19, 2018
23579e1
Merge remote-tracking branch 'upstream/master' into bugfix/large-array
apeforest Nov 19, 2018
1cd9b88
lint it again
apeforest Nov 20, 2018
335e896
fix python include error
apeforest Nov 20, 2018
a08c79e
fix unit test
apeforest Nov 20, 2018
69703fc
lint me in
apeforest Nov 20, 2018
01952c5
fix python unit test in python2 windows
apeforest Nov 20, 2018
a68bd97
fix perl-package unit test
apeforest Nov 20, 2018
c1b14d1
fix perl package
apeforest Nov 20, 2018
a3daa9b
Merge remote-tracking branch 'upstream/master' into bugfix/large-array
apeforest Nov 27, 2018
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 16 additions & 16 deletions include/mxnet/c_api.h
Original file line number Diff line number Diff line change
Expand Up @@ -487,7 +487,7 @@ MXNET_DLL int MXNDArrayCreateNone(NDArrayHandle *out);
* \param out the returning handle
* \return 0 when success, -1 when failure happens
*/
MXNET_DLL int MXNDArrayCreate(const mx_uint *shape,
MXNET_DLL int MXNDArrayCreate(const dim_t *shape,
mx_uint ndim,
int dev_type,
int dev_id,
Expand All @@ -506,7 +506,7 @@ MXNET_DLL int MXNDArrayCreate(const mx_uint *shape,
* \param out the returning handle
* \return 0 when success, -1 when failure happens
*/
MXNET_DLL int MXNDArrayCreateEx(const mx_uint *shape,
MXNET_DLL int MXNDArrayCreateEx(const dim_t *shape,
mx_uint ndim,
int dev_type,
int dev_id,
Expand All @@ -533,7 +533,7 @@ MXNET_DLL int MXNDArrayCreateEx(const mx_uint *shape,
* \return 0 when success, -1 when failure happens
*/
MXNET_DLL int MXNDArrayCreateSparseEx(int storage_type,
const mx_uint *shape,
const dim_t *shape,
mx_uint ndim,
int dev_type,
int dev_id,
Expand Down Expand Up @@ -693,8 +693,8 @@ MXNET_DLL int MXNDArrayFree(NDArrayHandle handle);
* \return 0 when success, -1 when failure happens
*/
MXNET_DLL int MXNDArraySlice(NDArrayHandle handle,
mx_uint slice_begin,
mx_uint slice_end,
dim_t slice_begin,
dim_t slice_end,
NDArrayHandle *out);

/*!
Expand All @@ -705,7 +705,7 @@ MXNET_DLL int MXNDArraySlice(NDArrayHandle handle,
* \return 0 when success, -1 when failure happens
*/
MXNET_DLL int MXNDArrayAt(NDArrayHandle handle,
mx_uint idx,
dim_t idx,
NDArrayHandle *out);

/*!
Expand Down Expand Up @@ -748,8 +748,8 @@ MXNET_DLL int MXNDArrayReshape64(NDArrayHandle handle,
* \return 0 when success, -1 when failure happens
*/
MXNET_DLL int MXNDArrayGetShape(NDArrayHandle handle,
mx_uint *out_dim,
const mx_uint **out_pdata);
dim_t *out_dim,
const dim_t **out_pdata);
/*!
* \brief get the content of the data in NDArray
* \param handle the handle to the ndarray
Expand Down Expand Up @@ -1466,16 +1466,16 @@ MXNET_DLL int MXSymbolInferShape(SymbolHandle sym,
mx_uint num_args,
const char** keys,
const mx_uint *arg_ind_ptr,
const mx_uint *arg_shape_data,
const dim_t *arg_shape_data,
mx_uint *in_shape_size,
const mx_uint **in_shape_ndim,
const mx_uint ***in_shape_data,
const dim_t ***in_shape_data,
mx_uint *out_shape_size,
const mx_uint **out_shape_ndim,
const mx_uint ***out_shape_data,
const dim_t ***out_shape_data,
mx_uint *aux_shape_size,
const mx_uint **aux_shape_ndim,
const mx_uint ***aux_shape_data,
const dim_t ***aux_shape_data,
int *complete);
/*!
* \brief partially infer shape of unknown input shapes given the known one.
Expand Down Expand Up @@ -1505,16 +1505,16 @@ MXNET_DLL int MXSymbolInferShapePartial(SymbolHandle sym,
mx_uint num_args,
const char** keys,
const mx_uint *arg_ind_ptr,
const mx_uint *arg_shape_data,
const dim_t *arg_shape_data,
mx_uint *in_shape_size,
const mx_uint **in_shape_ndim,
const mx_uint ***in_shape_data,
const dim_t ***in_shape_data,
mx_uint *out_shape_size,
const mx_uint **out_shape_ndim,
const mx_uint ***out_shape_data,
const dim_t ***out_shape_data,
mx_uint *aux_shape_size,
const mx_uint **aux_shape_ndim,
const mx_uint ***aux_shape_data,
const dim_t ***aux_shape_data,
int *complete);

/*!
Expand Down
2 changes: 1 addition & 1 deletion include/mxnet/ndarray.h
Original file line number Diff line number Diff line change
Expand Up @@ -464,7 +464,7 @@ class NDArray {
/*!
* \brief Copy from src.data()/aux_data(i) to this->data()/aux_data(j)
*/
void SyncCopyFromNDArray(const NDArray &src, int i = -1, int j = -1);
void SyncCopyFromNDArray(const NDArray &src, index_t i = -1, index_t j = -1);

/*!
* \brief Do a synchronize copy to a continugous CPU memory region.
Expand Down
1 change: 1 addition & 0 deletions python/mxnet/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -215,6 +215,7 @@ def _load_lib():
# type definitions
mx_uint = ctypes.c_uint
mx_float = ctypes.c_float
mx_long = ctypes.c_longlong
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

should this be longlong or just long?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It should be longlong. c_long is the same as c_int in python ctypes.

mx_float_p = ctypes.POINTER(mx_float)
mx_real_t = np.float32
NDArrayHandle = ctypes.c_void_p
Expand Down
10 changes: 5 additions & 5 deletions python/mxnet/ndarray/ndarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@
import numpy as np
from ..base import _LIB, numeric_types, integer_types
from ..base import c_str, c_array, c_array_buf, c_handle_array, mx_real_t
from ..base import mx_uint, NDArrayHandle, check_call, DLPackHandle
from ..base import mx_uint, mx_long, NDArrayHandle, check_call, DLPackHandle
from ..base import ctypes2buffer
from ..context import Context, current_context
from . import _internal
Expand Down Expand Up @@ -131,7 +131,7 @@ def _new_alloc_handle(shape, ctx, delay_alloc, dtype=mx_real_t):
"""
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayCreateEx(
c_array_buf(mx_uint, native_array('I', shape)),
c_array_buf(mx_long, native_array('I', shape)),
mx_uint(len(shape)),
ctypes.c_int(ctx.device_typeid),
ctypes.c_int(ctx.device_id),
Expand Down Expand Up @@ -902,7 +902,7 @@ def _slice(self, start, stop):
start, stop, _ = _get_index_range(start, stop, self.shape[0])

check_call(_LIB.MXNDArraySlice(
self.handle, mx_uint(start), mx_uint(stop), ctypes.byref(handle)))
self.handle, mx_long(start), mx_long(stop), ctypes.byref(handle)))
return NDArray(handle=handle, writable=self.writable)

def _at(self, idx):
Expand Down Expand Up @@ -936,7 +936,7 @@ def _at(self, idx):
raise IndexError('index %d is out of bounds for axis 0 with size %d'
% (idx-length, length))
check_call(_LIB.MXNDArrayAt(
self.handle, mx_uint(idx), ctypes.byref(handle)))
self.handle, mx_long(idx), ctypes.byref(handle)))
return NDArray(handle=handle, writable=self.writable)

def reshape(self, *shape, **kwargs):
Expand Down Expand Up @@ -1834,7 +1834,7 @@ def shape(self):
(2L, 3L, 4L)
"""
ndim = mx_uint()
pdata = ctypes.POINTER(mx_uint)()
pdata = ctypes.POINTER(mx_long)()
check_call(_LIB.MXNDArrayGetShape(
self.handle, ctypes.byref(ndim), ctypes.byref(pdata)))
return tuple(pdata[:ndim.value]) # pylint: disable=invalid-slice-index
Expand Down
20 changes: 10 additions & 10 deletions src/c_api/c_api.cc
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ int MXNDArrayCreateNone(NDArrayHandle *out) {
API_END();
}

int MXNDArrayCreate(const mx_uint *shape,
int MXNDArrayCreate(const dim_t *shape,
mx_uint ndim,
int dev_type,
int dev_id,
Expand All @@ -165,7 +165,7 @@ int MXNDArrayCreate(const mx_uint *shape,
API_END();
}

int MXNDArrayCreateEx(const mx_uint *shape,
int MXNDArrayCreateEx(const dim_t *shape,
mx_uint ndim,
int dev_type,
int dev_id,
Expand All @@ -182,7 +182,7 @@ int MXNDArrayCreateEx(const mx_uint *shape,
}

int MXNDArrayCreateSparseEx(int storage_type,
const mx_uint *shape,
const dim_t *shape,
mx_uint ndim,
int dev_type,
int dev_id,
Expand Down Expand Up @@ -266,7 +266,7 @@ int MXNDArraySyncCopyToCPU(NDArrayHandle handle,
*/
int MXNDArraySyncCopyFromNDArray(NDArrayHandle handle_dst,
const NDArrayHandle handle_src,
const int i) {
const dim_t i) {
API_BEGIN();
NDArray* dst = static_cast<NDArray*>(handle_dst);
NDArray* src = static_cast<NDArray*>(handle_src);
Expand Down Expand Up @@ -394,8 +394,8 @@ int MXNDArrayFree(NDArrayHandle handle) {
}

int MXNDArraySlice(NDArrayHandle handle,
mx_uint slice_begin,
mx_uint slice_end,
dim_t slice_begin,
dim_t slice_end,
NDArrayHandle *out) {
NDArray *ptr = new NDArray();
API_BEGIN();
Expand All @@ -406,7 +406,7 @@ int MXNDArraySlice(NDArrayHandle handle,
}

int MXNDArrayAt(NDArrayHandle handle,
mx_uint idx,
dim_t idx,
NDArrayHandle *out) {
NDArray *ptr = new NDArray();
API_BEGIN();
Expand Down Expand Up @@ -481,15 +481,15 @@ int MXNDArrayGetStorageType(NDArrayHandle handle,
}

int MXNDArrayGetShape(NDArrayHandle handle,
mx_uint *out_dim,
const mx_uint **out_pdata) {
dim_t *out_dim,
const dim_t **out_pdata) {
MXAPIThreadLocalEntry *ret = MXAPIThreadLocalStore::Get();
API_BEGIN();
NDArray *arr = static_cast<NDArray*>(handle);
if (!arr->is_none()) {
const TShape &s = arr->shape();
*out_dim = s.ndim();
std::vector<uint32_t>& buffer = ret->arg_shape_buffer;
std::vector<dim_t>& buffer = ret->arg_shape_buffer;
buffer.resize(s.ndim());
nnvm::ShapeTypeCast(s.begin(), s.end(), buffer.data());
*out_pdata = buffer.data();
Expand Down
10 changes: 5 additions & 5 deletions src/c_api/c_api_common.h
Original file line number Diff line number Diff line change
Expand Up @@ -84,23 +84,23 @@ struct MXAPIThreadLocalEntry {
/*! \brief result holder for returning shape dimensions */
std::vector<mx_uint> arg_shape_ndim, out_shape_ndim, aux_shape_ndim;
/*! \brief result holder for returning shape pointer */
std::vector<const mx_uint*> arg_shape_data, out_shape_data, aux_shape_data;
std::vector<const dim_t*> arg_shape_data, out_shape_data, aux_shape_data;
/*! \brief uint32_t buffer for returning shape pointer */
std::vector<uint32_t> arg_shape_buffer, out_shape_buffer, aux_shape_buffer;
std::vector<dim_t> arg_shape_buffer, out_shape_buffer, aux_shape_buffer;
/*! \brief bool buffer */
std::vector<bool> save_inputs, save_outputs;
// helper function to setup return value of shape array
inline static void SetupShapeArrayReturnWithBuffer(
const std::vector<TShape> &shapes,
std::vector<mx_uint> *ndim,
std::vector<const mx_uint*> *data,
std::vector<uint32_t> *buffer) {
std::vector<const dim_t*> *data,
std::vector<dim_t> *buffer) {
ndim->resize(shapes.size());
data->resize(shapes.size());
size_t size = 0;
for (const auto& s : shapes) size += s.ndim();
buffer->resize(size);
uint32_t *ptr = buffer->data();
dim_t *ptr = buffer->data();
for (size_t i = 0; i < shapes.size(); ++i) {
ndim->at(i) = shapes[i].ndim();
data->at(i) = ptr;
Expand Down
16 changes: 8 additions & 8 deletions src/c_api/c_api_symbolic.cc
Original file line number Diff line number Diff line change
Expand Up @@ -505,16 +505,16 @@ int MXSymbolInferShape(SymbolHandle sym,
mx_uint num_args,
const char** keys,
const mx_uint *arg_ind_ptr,
const mx_uint *arg_shape_data,
const dim_t *arg_shape_data,
mx_uint *in_shape_size,
const mx_uint **in_shape_ndim,
const mx_uint ***in_shape_data,
const dim_t ***in_shape_data,
mx_uint *out_shape_size,
const mx_uint **out_shape_ndim,
const mx_uint ***out_shape_data,
const dim_t ***out_shape_data,
mx_uint *aux_shape_size,
const mx_uint **aux_shape_ndim,
const mx_uint ***aux_shape_data,
const dim_t ***aux_shape_data,
int *complete) {
nnvm::Symbol *s = static_cast<nnvm::Symbol*>(sym);
MXAPIThreadLocalEntry *ret = MXAPIThreadLocalStore::Get();
Expand Down Expand Up @@ -572,16 +572,16 @@ int MXSymbolInferShapePartial(SymbolHandle sym,
mx_uint num_args,
const char** keys,
const mx_uint *arg_ind_ptr,
const mx_uint *arg_shape_data,
const dim_t *arg_shape_data,
mx_uint *in_shape_size,
const mx_uint **in_shape_ndim,
const mx_uint ***in_shape_data,
const dim_t ***in_shape_data,
mx_uint *out_shape_size,
const mx_uint **out_shape_ndim,
const mx_uint ***out_shape_data,
const dim_t ***out_shape_data,
mx_uint *aux_shape_size,
const mx_uint **aux_shape_ndim,
const mx_uint ***aux_shape_data,
const dim_t ***aux_shape_data,
int *complete) {
int succ;
*complete = 1;
Expand Down
2 changes: 1 addition & 1 deletion src/ndarray/ndarray.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1849,7 +1849,7 @@ void NDArray::SyncCopyFromCPU(const void *data, size_t size) const {
/*!
* \brief Copy src.data()/aux_data(i) to dst->data()/aux_data(j).
*/
void NDArray::SyncCopyFromNDArray(const NDArray& src, int i, int j) {
void NDArray::SyncCopyFromNDArray(const NDArray& src, index_t i, index_t j) {
if (i >= 0) {
CHECK_NE(src.storage_type(), kDefaultStorage);
} else {
Expand Down
8 changes: 4 additions & 4 deletions src/operator/elemwise_op_common.h
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ inline bool ElemwiseStorageAttr(const nnvm::NodeAttrs& attrs,
* \tparam rsp whether row sparse stype is supported
* \tparam rsp whether csr stype is supported
*/
template<int n_in, int n_out, bool cpu_only, bool rsp, bool csr>
template<index_t n_in, index_t n_out, bool cpu_only, bool rsp, bool csr>
inline bool ElemwiseStorageType(const nnvm::NodeAttrs& attrs,
const int dev_mask,
DispatchMode* dispatch_mode,
Expand All @@ -115,7 +115,7 @@ inline bool ElemwiseStorageType(const nnvm::NodeAttrs& attrs,
template<typename AttrType, bool (*is_none)(const AttrType&),
bool (*assign)(AttrType*, const AttrType&), bool reverse_infer,
std::string (*attr_string)(const AttrType&),
int n_in = -1, int n_out = -1>
index_t n_in = -1, index_t n_out = -1>
inline bool ElemwiseAttr(const nnvm::NodeAttrs& attrs,
std::vector<AttrType> *in_attrs,
std::vector<AttrType> *out_attrs,
Expand Down Expand Up @@ -154,7 +154,7 @@ inline bool ElemwiseAttr(const nnvm::NodeAttrs& attrs,
return true;
}

template<int n_in, int n_out>
template<index_t n_in, index_t n_out>
inline bool ElemwiseShape(const nnvm::NodeAttrs& attrs,
std::vector<TShape> *in_attrs,
std::vector<TShape> *out_attrs) {
Expand All @@ -168,7 +168,7 @@ inline bool ElemwiseShape(const nnvm::NodeAttrs& attrs,
attrs, in_attrs, out_attrs, TShape());
}

template<int n_in, int n_out>
template<index_t n_in, index_t n_out>
inline bool ElemwiseType(const nnvm::NodeAttrs& attrs,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
Expand Down
Loading