Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
adding tests and comments. Removed int64 check from linspace
Browse files Browse the repository at this point in the history
  • Loading branch information
Rohit Kumar Srivastava committed Oct 26, 2019
1 parent 155088e commit c9c8bb4
Show file tree
Hide file tree
Showing 3 changed files with 58 additions and 5 deletions.
2 changes: 2 additions & 0 deletions python/mxnet/ndarray/ndarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,6 +155,8 @@ def _new_alloc_handle(shape, ctx, delay_alloc, dtype=mx_real_t):
ctypes.c_int(int(_DTYPE_NP_TO_MX[np.dtype(dtype).type])),
ctypes.byref(hdl)))
else:
# When shape is larger than unit32 then there is an overflow error at python end itself.
# It needs to be caught here since the call doesn't even reach backend.
size = 1
for idx in shape:
size = size * idx
Expand Down
5 changes: 0 additions & 5 deletions src/operator/tensor/init_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -646,11 +646,6 @@ inline bool LinspaceShape(const nnvm::NodeAttrs& attrs,
CHECK_GE(param.num, 0)
<< "Number of sequence should be non-negative, received " << param.num;
mxnet::TShape shape = mxnet::TShape({static_cast<nnvm::dim_t>(param.num)});
if (!features::is_enabled(features::INT64_TENSOR_SIZE)) {
CHECK_LT(shape.Size(), (int64_t{1} << 31) - 1) <<
"[LinspaceShape] Size of tensor you are trying to allocate is larger than "
"2^32 elements. Please build with flag USE_INT64_TENSOR_SIZE=1";
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape);
return true;
}
Expand Down
56 changes: 56 additions & 0 deletions tests/python/unittest/test_operator.py
Original file line number Diff line number Diff line change
Expand Up @@ -9279,6 +9279,62 @@ def test_min_max_inf():
assert_array_equal(max_data_np, max_data_mx.asnumpy())


def test_large_tensor_disabled_err_msg():
LARGE_X = 4300000000
MEDIUM_X = 1000000000
SMALL_Y = 1
shape = (2, LARGE_X)

def check_nd_array():
x = np.arange(0, LARGE_X)
assertRaises(MXNetError, mx.nd.array, x)

def check_nd_ones():
assertRaises(MXNetError, mx.nd.ones, shape)

def check_nd_zeros():
assertRaises(MXNetError, mx.nd.zeros, shape)

def check_nd_full():
val = 1
assertRaises(Exception, mx.nd.full, shape, val)

def check_nd_arange():
start = 0
stop = LARGE_X
assertRaises(Exception, mx.nd.arange, start, stop)

def check_nd_random():
shape = (2, LARGE_X)
def check_random_exp():
lam = 4
assertRaises(MXNetError, mx.nd.random_exponential, lam, shape)

def check_random_gamma():
alpha = 9
beta = 0.5
assertRaises(MXNetError, mx.nd.random_gamma, alpha, beta, shape)

def check_random_normal():
loc = 0
scale = 1
assertRaises(MXNetError, mx.nd.random_normal, loc, scale, shape)

def check_random_poisson():
lam = 4
assertRaises(MXNetError, mx.nd.random_poisson, alpha, lam, shape)

def check_random_randint():
low = 0
high = 1000000
assertRaises(MXNetError, mx.nd.random_randint, low, high, shape)

def check_random_uniform():
low = 0
hight = 1
assertRaises(MXNetError, mx.nd.random_uniform, alpha, beta, shape)


if __name__ == '__main__':
import nose
nose.runmodule()

0 comments on commit c9c8bb4

Please sign in to comment.