From c8f28e04f47ed60d0078617650234f31bfcad65d Mon Sep 17 00:00:00 2001 From: Rohit Kumar Srivastava Date: Fri, 25 Oct 2019 23:28:18 +0000 Subject: [PATCH] adding tests and comments. Removed int64 check from linspace --- python/mxnet/ndarray/ndarray.py | 2 + src/operator/tensor/init_op.h | 5 --- tests/python/unittest/test_operator.py | 56 ++++++++++++++++++++++++++ 3 files changed, 58 insertions(+), 5 deletions(-) diff --git a/python/mxnet/ndarray/ndarray.py b/python/mxnet/ndarray/ndarray.py index f72029d194bf..c95236c0e10a 100644 --- a/python/mxnet/ndarray/ndarray.py +++ b/python/mxnet/ndarray/ndarray.py @@ -155,6 +155,8 @@ def _new_alloc_handle(shape, ctx, delay_alloc, dtype=mx_real_t): ctypes.c_int(int(_DTYPE_NP_TO_MX[np.dtype(dtype).type])), ctypes.byref(hdl))) else: + # When shape is larger than unit32 then there is an overflow error at python end itself. + # It needs to be caught here since the call doesn't even reach backend. size = 1 for idx in shape: size = size * idx diff --git a/src/operator/tensor/init_op.h b/src/operator/tensor/init_op.h index ac7be8366155..a0139f7fde2d 100644 --- a/src/operator/tensor/init_op.h +++ b/src/operator/tensor/init_op.h @@ -646,11 +646,6 @@ inline bool LinspaceShape(const nnvm::NodeAttrs& attrs, CHECK_GE(param.num, 0) << "Number of sequence should be non-negative, received " << param.num; mxnet::TShape shape = mxnet::TShape({static_cast(param.num)}); - if (!features::is_enabled(features::INT64_TENSOR_SIZE)) { - CHECK_LT(shape.Size(), (int64_t{1} << 31) - 1) << - "[LinspaceShape] Size of tensor you are trying to allocate is larger than " - "2^32 elements. Please build with flag USE_INT64_TENSOR_SIZE=1"; - } SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape); return true; } diff --git a/tests/python/unittest/test_operator.py b/tests/python/unittest/test_operator.py index 075816fdc6de..33f739bd10fc 100644 --- a/tests/python/unittest/test_operator.py +++ b/tests/python/unittest/test_operator.py @@ -9279,6 +9279,62 @@ def test_min_max_inf(): assert_array_equal(max_data_np, max_data_mx.asnumpy()) +def test_large_tensor_disabled_err_msg(): + LARGE_X = 4300000000 + MEDIUM_X = 1000000000 + SMALL_Y = 1 + shape = (2, LARGE_X) + + def check_nd_array(): + x = np.arange(0, LARGE_X) + assertRaises(MXNetError, mx.nd.array, x) + + def check_nd_ones(): + assertRaises(MXNetError, mx.nd.ones, shape) + + def check_nd_zeros(): + assertRaises(MXNetError, mx.nd.zeros, shape) + + def check_nd_full(): + val = 1 + assertRaises(Exception, mx.nd.full, shape, val) + + def check_nd_arange(): + start = 0 + stop = LARGE_X + assertRaises(Exception, mx.nd.arange, start, stop) + + def check_nd_random(): + shape = (2, LARGE_X) + def check_random_exp(): + lam = 4 + assertRaises(MXNetError, mx.nd.random_exponential, lam, shape) + + def check_random_gamma(): + alpha = 9 + beta = 0.5 + assertRaises(MXNetError, mx.nd.random_gamma, alpha, beta, shape) + + def check_random_normal(): + loc = 0 + scale = 1 + assertRaises(MXNetError, mx.nd.random_normal, loc, scale, shape) + + def check_random_poisson(): + lam = 4 + assertRaises(MXNetError, mx.nd.random_poisson, alpha, lam, shape) + + def check_random_randint(): + low = 0 + high = 1000000 + assertRaises(MXNetError, mx.nd.random_randint, low, high, shape) + + def check_random_uniform(): + low = 0 + hight = 1 + assertRaises(MXNetError, mx.nd.random_uniform, alpha, beta, shape) + + if __name__ == '__main__': import nose nose.runmodule()