From 1684abe2b7cceccfc16c78c5e4fa2f11e0577797 Mon Sep 17 00:00:00 2001 From: ChaiBapchya Date: Sat, 28 Sep 2019 01:19:43 +0000 Subject: [PATCH 01/13] fix activation --- tests/nightly/test_large_array.py | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/tests/nightly/test_large_array.py b/tests/nightly/test_large_array.py index 99856f770d5c..5d7279e79d0a 100644 --- a/tests/nightly/test_large_array.py +++ b/tests/nightly/test_large_array.py @@ -745,32 +745,32 @@ def test_dropout(): def test_activation(): - a = mx.nd.ones((LARGE_X, SMALL_Y)) + x = mx.nd.ones((LARGE_X, SMALL_Y)) test_x = -2 - a[-1, -1] = test_x + x[-1, -1] = test_x # Hyperbolic tangent (tanh) # y = (exp(x)-exp(-x))/(exp(x)+exp(-x)) - a = mx.nd.Activation(a, act_type="tanh") - tanh_x = (np.exp(test_x)-np.exp(-test_x))/(np.exp(test_x)+np.exp(-test_x)) - assert a[-1][-1] == tanh_x + y = mx.nd.Activation(x, act_type="tanh") + tanh_x = ((np.exp(test_x)-np.exp(-test_x))/(np.exp(test_x)+np.exp(-test_x)))#.astype('float32') + assert y[-1][-1] == np.float32(tanh_x)#.astype('float32') # Recitified Linear Unit (relu) # y = max(x,0) - a = mx.nd.Activation(a, act_type="relu") - assert a[-1][-1] == 0 + y = mx.nd.Activation(x, act_type="relu") + assert y[-1][-1] == 0 # Sigmoid # y = x/(1+abs(x)) - a = mx.nd.Activation(a, act_type="sigmoid") - sigmoid_x = 1/(1+math.exp(-test_x)) - assert a[-1][-1] == sigmoid_x + y = mx.nd.Activation(x, act_type="sigmoid") + sigmoid_x = (1/(1+math.exp(-test_x)))#.astype('float32') + assert y[-1][-1] == np.float32(sigmoid_x) # Soft Sign # y = 1/(1+exp(-x)) - a = mx.nd.Activation(a, act_type="softsign") - softsign_x = test_x/(1+abs(test_x)) - assert a[-1][-1] == softsign_x + y = mx.nd.Activation(x, act_type="softsign") + softsign_x = (test_x/(1+abs(test_x)))#.astype('float32') + assert y[-1][-1] == np.float32(softsign_x) # TODO: correctness of batchnorm From e54f41066c65cc296f330bbc3ea43360fcdc0f56 Mon Sep 17 00:00:00 2001 From: ChaiBapchya Date: Tue, 1 Oct 2019 16:45:05 -0700 Subject: [PATCH 02/13] remove comments --- tests/nightly/test_large_array.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/nightly/test_large_array.py b/tests/nightly/test_large_array.py index 5d7279e79d0a..4ebbb3e83297 100644 --- a/tests/nightly/test_large_array.py +++ b/tests/nightly/test_large_array.py @@ -752,8 +752,8 @@ def test_activation(): # Hyperbolic tangent (tanh) # y = (exp(x)-exp(-x))/(exp(x)+exp(-x)) y = mx.nd.Activation(x, act_type="tanh") - tanh_x = ((np.exp(test_x)-np.exp(-test_x))/(np.exp(test_x)+np.exp(-test_x)))#.astype('float32') - assert y[-1][-1] == np.float32(tanh_x)#.astype('float32') + tanh_x = ((np.exp(test_x)-np.exp(-test_x))/(np.exp(test_x)+np.exp(-test_x))) + assert y[-1][-1] == np.float32(tanh_x) # Recitified Linear Unit (relu) # y = max(x,0) @@ -763,13 +763,13 @@ def test_activation(): # Sigmoid # y = x/(1+abs(x)) y = mx.nd.Activation(x, act_type="sigmoid") - sigmoid_x = (1/(1+math.exp(-test_x)))#.astype('float32') + sigmoid_x = (1/(1+math.exp(-test_x))) assert y[-1][-1] == np.float32(sigmoid_x) # Soft Sign # y = 1/(1+exp(-x)) y = mx.nd.Activation(x, act_type="softsign") - softsign_x = (test_x/(1+abs(test_x)))#.astype('float32') + softsign_x = (test_x/(1+abs(test_x))) assert y[-1][-1] == np.float32(softsign_x) From 575bf5a54a4d5b394f6c59f6c0554af4271bef73 Mon Sep 17 00:00:00 2001 From: ChaiBapchya Date: Wed, 2 Oct 2019 11:25:21 -0700 Subject: [PATCH 03/13] fix copy_to --- tests/nightly/test_large_array.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/nightly/test_large_array.py b/tests/nightly/test_large_array.py index 4ebbb3e83297..9e33d44414ba 100644 --- a/tests/nightly/test_large_array.py +++ b/tests/nightly/test_large_array.py @@ -924,8 +924,7 @@ def test_copy_to(): b = nd.array(np.zeros((SMALL_Y, LARGE_X))) c = a.copyto(b) assert c is b - print(b) - assert b[0][-1] == LARGE_X-1 + assert b[-1][-1] == SMALL_Y-1 def test_zeros_like(): From cc5daf88e010d6dfdb7ffdb7c015cb3d8cad09f6 Mon Sep 17 00:00:00 2001 From: ChaiBapchya Date: Thu, 3 Oct 2019 17:38:37 -0700 Subject: [PATCH 04/13] fix lint, remove redundant function, fix shape sizes for random functions --- tests/nightly/test_large_array.py | 76 +++++++++++++------------------ 1 file changed, 31 insertions(+), 45 deletions(-) diff --git a/tests/nightly/test_large_array.py b/tests/nightly/test_large_array.py index 9e33d44414ba..f134704485f0 100644 --- a/tests/nightly/test_large_array.py +++ b/tests/nightly/test_large_array.py @@ -84,20 +84,20 @@ def test_ndarray_random_randint(): @with_seed() def test_ndarray_random_exponential(): - scale_array = nd.random.uniform(shape=(MEDIUM_X, SMALL_Y)) + scale_array = nd.random.uniform(shape=(MEDIUM_X, SMALL_X)) a = nd.random.exponential(scale=scale_array, shape=(SMALL_X, SMALL_Y)) assert a[-1][0][0][0] >= 0 - assert a.shape == (MEDIUM_X, SMALL_Y, SMALL_X, SMALL_Y) + assert a.shape == (MEDIUM_X, SMALL_X, SMALL_X, SMALL_Y) @with_seed() def test_ndarray_random_gamma(): - alpha_array = nd.random.uniform(shape=(MEDIUM_X, SMALL_Y)) - beta_array = nd.random.uniform(shape=(MEDIUM_X, SMALL_Y)) + alpha_array = nd.random.uniform(shape=(MEDIUM_X, SMALL_X)) + beta_array = nd.random.uniform(shape=(MEDIUM_X, SMALL_X)) a = nd.random.gamma(alpha=alpha_array, beta=beta_array, shape=(SMALL_X, SMALL_Y)) assert a[-1][0][0][0] >= 0 - assert a.shape == (MEDIUM_X, SMALL_Y, SMALL_X, SMALL_Y) + assert a.shape == (MEDIUM_X, SMALL_X, SMALL_X, SMALL_Y) @with_seed() @@ -108,50 +108,50 @@ def test_ndarray_random_multinomial(): assert a[-1] >= 0 assert a.shape == (LARGE_X,) # test for NDArray multi-dimension shape - a = nd.random.multinomial(probs, shape=(SMALL_X, SMALL_Y)) + a = nd.random.multinomial(probs, shape=(2, SMALL_Y)) assert a[-1][0][0] >= 0 - assert a.shape == (LARGE_X, SMALL_X, SMALL_Y) + assert a.shape == (LARGE_X, 2, SMALL_Y) # test log_likelihood output shape - a = nd.random.multinomial(probs, shape=(SMALL_X, SMALL_Y), get_prob=True) - assert a[-1][0][0] >= 0 - assert a[0].shape == (LARGE_X, SMALL_X, SMALL_Y) and a[0].shape == a[1].shape + a = nd.random.multinomial(probs, shape=(2, SMALL_Y), get_prob=True) + assert a[0][0][0][0] >= 0 + assert a[0].shape == (LARGE_X, 2, SMALL_Y) and a[0].shape == a[1].shape @with_seed() def test_ndarray_random_generalized_negative_binomial(): - alpha_array = nd.random.uniform(shape=(MEDIUM_X, SMALL_Y)) - mu_array = nd.random.uniform(shape=(MEDIUM_X, SMALL_Y)) + alpha_array = nd.random.uniform(shape=(MEDIUM_X, SMALL_X)) + mu_array = nd.random.uniform(shape=(MEDIUM_X, SMALL_X)) a = nd.random.generalized_negative_binomial(mu=mu_array, alpha=alpha_array, shape=(SMALL_X, SMALL_Y)) assert a[-1][0][0][0] >= 0 - assert a.shape == (MEDIUM_X, SMALL_Y, SMALL_X, SMALL_Y) + assert a.shape == (MEDIUM_X, SMALL_X, SMALL_X, SMALL_Y) @with_seed() def test_ndarray_random_negative_binomial(): - k_array = nd.random.uniform(shape=(MEDIUM_X, SMALL_Y)) - p_array = nd.random.uniform(shape=(MEDIUM_X, SMALL_Y)) + k_array = nd.random.uniform(shape=(MEDIUM_X, SMALL_X)) + p_array = nd.random.uniform(shape=(MEDIUM_X, SMALL_X)) a = nd.random.negative_binomial(k=k_array, p=p_array, shape=(SMALL_X, SMALL_Y)) assert a[-1][0][0][0] >= 0 - assert a.shape == (MEDIUM_X, SMALL_Y, SMALL_X, SMALL_Y) + assert a.shape == (MEDIUM_X, SMALL_X, SMALL_X, SMALL_Y) @with_seed() def test_ndarray_random_normal(): - scale_array = nd.random.uniform(shape=(MEDIUM_X, SMALL_Y)) - loc_array = nd.random.uniform(shape=(MEDIUM_X, SMALL_Y)) + scale_array = nd.random.uniform(shape=(MEDIUM_X, SMALL_X)) + loc_array = nd.random.uniform(shape=(MEDIUM_X, SMALL_X)) a = nd.random.normal(loc=loc_array, scale=scale_array, shape=(SMALL_X, SMALL_Y)) - assert a.shape == (MEDIUM_X, SMALL_Y, SMALL_X, SMALL_Y) + assert a.shape == (MEDIUM_X, SMALL_X, SMALL_X, SMALL_Y) @with_seed() def test_ndarray_random_poisson(): - lambda_array = nd.random.uniform(shape=(MEDIUM_X, SMALL_Y)) + lambda_array = nd.random.uniform(shape=(MEDIUM_X, SMALL_X)) a = nd.random.poisson(lam=lambda_array, shape=(SMALL_X, SMALL_Y)) assert a[-1][0][0][0] >= 0 - assert a.shape == (MEDIUM_X, SMALL_Y, SMALL_X, SMALL_Y) + assert a.shape == (MEDIUM_X, SMALL_X, SMALL_X, SMALL_Y) @with_seed() @@ -269,6 +269,7 @@ def test_slice_assign(): def test_expand_dims(): a = nd.ones(shape=(LARGE_X, SMALL_Y)) res = nd.expand_dims(a, axis=1) + assert a[0][0][0] == 1 assert res.shape == (a.shape[0], 1, a.shape[1]) @@ -561,7 +562,7 @@ def test_sequence_last(): # test if returns last sequence b = nd.SequenceLast(a) - assert_almost_equal(b.asnumpy(), a[-1].asnumpy()) # only checks for (2,SMALL_Y) tensor + assert_almost_equal(b.asnumpy(), a[-1].asnumpy()) # only checks for (2, SMALL_Y) tensor assert b.shape == (2, SMALL_Y) # test with sequence length @@ -956,24 +957,17 @@ def test_flatten(): assert b.shape == (LARGE_X//2, SMALL_Y*2) -def test_expand_dims(): - a = nd.array(np.ones((SMALL_Y, LARGE_X))) - b = nd.expand_dims(a, axis=1) - nd.waitall() - assert b.shape == (SMALL_Y, 1, LARGE_X) - - def test_concat(): a = nd.array(np.ones((SMALL_Y, LARGE_X))) b = nd.array(np.zeros((SMALL_Y, LARGE_X))) - c = nd.concat(a,b, dim=0) + c = nd.concat(a, b, dim=0) assert c.shape == (b.shape[0]*2, LARGE_X) def test_stack(): a = nd.array(np.ones((SMALL_Y, LARGE_X))) b = nd.array(np.zeros((SMALL_Y, LARGE_X))) - c = nd.stack(a,b, axis=1) + c = nd.stack(a, b, axis=1) assert c.shape == (b.shape[0], 2, LARGE_X) @@ -1018,7 +1012,7 @@ def test_max(): def test_norm(): a = np.array(np.full((1, LARGE_X), 3)) b = np.array(np.full((1, LARGE_X), 4)) - c = nd.array(np.concatenate((a,b), axis=0)) + c = nd.array(np.concatenate((a, b), axis=0)) d = nd.norm(c, ord=2, axis=0) e = nd.norm(c, ord=1, axis=0) assert d.shape[0] == LARGE_X @@ -1030,7 +1024,7 @@ def test_norm(): def test_argmax(): a = np.ones((SMALL_Y, LARGE_X)) b = np.zeros((SMALL_Y, LARGE_X)) - c = nd.array(np.concatenate((a,b), axis=0)) + c = nd.array(np.concatenate((a, b), axis=0)) d = nd.argmax(c, axis=0) assert d.shape[0] == LARGE_X assert d[-1] == d[0] == 0 @@ -1039,12 +1033,13 @@ def test_argmax(): def test_relu(): def frelu(x): return np.maximum(x, 0.0) + def frelu_grad(x): return 1.0 * (x > 0.0) shape = (SMALL_Y, LARGE_X) x = mx.symbol.Variable("x") y = mx.sym.relu(x) - xa = np.random.uniform(low=-1.0,high=1.0,size=shape) + xa = np.random.uniform(low=-1.0, high=1.0, size=shape) eps = 1e-4 xa[abs(xa) < eps] = 1.0 ya = frelu(xa) @@ -1058,7 +1053,7 @@ def fsigmoid(a): shape = (SMALL_Y, LARGE_X) x = mx.symbol.Variable("x") y = mx.sym.sigmoid(x) - xa = np.random.uniform(low=-1.0,high=1.0,size=shape) + xa = np.random.uniform(low=-1.0, high=1.0, size=shape) ya = fsigmoid(xa) check_symbolic_forward(y, [xa], [ya]) @@ -1115,15 +1110,6 @@ def test_idiv(): assert c[0][-1] == 2 -def test_imod(): - a = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 3))) - b = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 2))) - c = a - c %= b - assert c.shape == a.shape - assert c[0][-1] == 1 - - def test_eq(): a = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 3))) b = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 3))) @@ -1197,7 +1183,7 @@ def test_slice_axis(): def test_one_hot(): - #default dtype of ndarray is float32 which cannot index elements over 2^32 + # default dtype of ndarray is float32 which cannot index elements over 2^32 a = nd.array([1, (VLARGE_X - 1)], dtype=np.int64) b = nd.one_hot(a, VLARGE_X) b[0][1] == 1 From db628d7d1ca9d8eedd6e5b028f6a12539bd66a24 Mon Sep 17 00:00:00 2001 From: ChaiBapchya Date: Thu, 3 Oct 2019 23:57:07 -0700 Subject: [PATCH 05/13] fix sigmoid issue --- tests/nightly/test_large_array.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/nightly/test_large_array.py b/tests/nightly/test_large_array.py index f134704485f0..ceebb298fe2d 100644 --- a/tests/nightly/test_large_array.py +++ b/tests/nightly/test_large_array.py @@ -765,7 +765,7 @@ def test_activation(): # y = x/(1+abs(x)) y = mx.nd.Activation(x, act_type="sigmoid") sigmoid_x = (1/(1+math.exp(-test_x))) - assert y[-1][-1] == np.float32(sigmoid_x) + assert_almost_equal(y[-1][-1].asnumpy(), np.float32(sigmoid_x), atol=1e-3, rtol=1e-3) # Soft Sign # y = 1/(1+exp(-x)) From 2c8a08e500d3a47e1da446c649f94f1d550d2c12 Mon Sep 17 00:00:00 2001 From: ChaiBapchya Date: Fri, 4 Oct 2019 00:15:14 -0700 Subject: [PATCH 06/13] fix leaky relu --- tests/nightly/test_large_array.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/nightly/test_large_array.py b/tests/nightly/test_large_array.py index ceebb298fe2d..2b207c90dc38 100644 --- a/tests/nightly/test_large_array.py +++ b/tests/nightly/test_large_array.py @@ -638,23 +638,23 @@ def test_leaky_relu(): def test_leaky(): res = mx.nd.LeakyReLU(a, act_type="leaky", slope=0.3) - assert res[-1][-1].asnumpy() == 0.3*a[-1][-1].asnumpy() + assert_almost_equal(res[-1][-1].asnumpy(), 0.3*a[-1][-1].asnumpy(), atol=1e-3, rtol=1e-3) def test_elu(): res = mx.nd.LeakyReLU(a, act_type="elu", slope=0.3) - assert res[-1][-1].asnumpy() == 0.3*(np.exp(a[-1][-1].asnumpy())-1) + assert_almost_equal(res[-1][-1].asnumpy(), 0.3*(np.exp(a[-1][-1].asnumpy())-1), atol=1e-3, rtol=1e-3) def test_selu(): lam = 1.0507009873554804934193349852946 alpha = 1.6732632423543772848170429916717 res = mx.nd.LeakyReLU(a, act_type="selu") - assert res[-1][-1].asnumpy() == (lam * alpha * (np.exp(a[-1][-1].asnumpy())-1)) + assert_almost_equal(res[-1][-1].asnumpy(), (lam * alpha * (np.exp(a[-1][-1].asnumpy())-1)), atol=1e-3, rtol=1e-3) def test_rrelu(): lower = 0.125 upper = 0.333999991 res = mx.nd.LeakyReLU(a, act_type="rrelu") - assert res[-1][-1].asnumpy() == (lower + upper) / 2 * a[-1][-1].asnumpy() + assert_almost_equal(res[0][-1][-1].asnumpy(), (lower + upper) / 2 * a[-1][-1].asnumpy(), atol=1e-3, rtol=1e-3) test_leaky() test_elu() From b57b8843e134a90b758a80fbdd7b4df3187017df Mon Sep 17 00:00:00 2001 From: ChaiBapchya Date: Fri, 4 Oct 2019 10:50:26 -0700 Subject: [PATCH 07/13] fix random shuffle --- tests/nightly/test_large_array.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/nightly/test_large_array.py b/tests/nightly/test_large_array.py index 2b207c90dc38..a6ace5a97106 100644 --- a/tests/nightly/test_large_array.py +++ b/tests/nightly/test_large_array.py @@ -165,7 +165,7 @@ def test_ndarray_random_randn(): @with_seed() def test_ndarray_random_shuffle(): a = nd.ones(shape=(LARGE_X, SMALL_Y)) - a[-1] == 3 # assign 3 to entire last row + a[-1] = 3 # assign 3 to entire last row a = nd.random.shuffle(a) # slice first column from shuffled array # pass LARGE_X values to numpy instead of LARGE_X*SMALL_Y @@ -175,7 +175,7 @@ def test_ndarray_random_shuffle(): assert len(unique_a) == 2 # only 2 unique values assert unique_a[0] == 1 # first unique value is 1 assert unique_a[1] == 3 # second unique value is 3 - assert a.shape[0] == (LARGE_X, SMALL_Y) + assert a.shape == (LARGE_X, SMALL_Y) def test_ndarray_empty(): From b448a70f9f478224fd50e302c77467934e494e14 Mon Sep 17 00:00:00 2001 From: ChaiBapchya Date: Fri, 4 Oct 2019 13:23:00 -0700 Subject: [PATCH 08/13] fix pooling --- tests/nightly/test_large_array.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/tests/nightly/test_large_array.py b/tests/nightly/test_large_array.py index a6ace5a97106..099084bbe530 100644 --- a/tests/nightly/test_large_array.py +++ b/tests/nightly/test_large_array.py @@ -663,31 +663,31 @@ def test_rrelu(): def test_pooling(): - a = mx.nd.ones((MEDIUM_X, MEDIUM_X, SMALL_Y, SMALL_Y)) + a = mx.nd.ones((MEDIUM_X, 200, SMALL_Y, SMALL_Y)) def test_avg_pooling(): res = mx.nd.Pooling(a, kernel=(5, 5), pool_type='avg') - assert res[-1][-1][-1][-1] == 1.0000001 - assert res.shape == SMALL_Y - 5 + 1 + assert_almost_equal(res[-1][-1][-1][-1].asnumpy(), 1.0000001, atol=1e-3, rtol=1e-3) + assert res.shape[-1] == SMALL_Y - 5 + 1 def test_max_pooling(): res = mx.nd.Pooling(a, kernel=(5, 5), pool_type='max') - assert res[-1][-1][-1][-1] == 1. - assert res.shape == SMALL_Y - 5 + 1 + assert_almost_equal(res[-1][-1][-1][-1].asnumpy(), 1., atol=1e-3, rtol=1e-3) + assert res.shape[-1] == SMALL_Y - 5 + 1 def test_sum_pooling(): res = mx.nd.Pooling(a, kernel=(5, 5), pool_type='sum') - assert res[-1][-1][-1][-1] == 25 - assert res.shape == SMALL_Y - 5 + 1 + assert_almost_equal(res[-1][-1][-1][-1].asnumpy(), 25, atol=1e-3, rtol=1e-3) + assert res.shape[-1] == SMALL_Y - 5 + 1 def test_lp_pooling(): res = mx.nd.Pooling(a, kernel=(5, 5), pool_type='lp', p_value=2) - assert res[-1][-1][-1][-1] == 5. - assert res.shape == SMALL_Y - 5 + 1 + assert_almost_equal(res[-1][-1][-1][-1].asnumpy(), 5., atol=1e-3, rtol=1e-3) + assert res.shape[-1] == SMALL_Y - 5 + 1 res = mx.nd.Pooling(a, kernel=(5, 5), pool_type='lp', p_value=1) - assert res[-1][-1][-1][-1] == 25. - assert res.shape == SMALL_Y - 5 + 1 + assert_almost_equal(res[-1][-1][-1][-1].asnumpy(), 25., atol=1e-3, rtol=1e-3) + assert res.shape[-1] == SMALL_Y - 5 + 1 test_avg_pooling() test_max_pooling() From 087f20a851ad515cfa6f8a54a490ca51ac7651a7 Mon Sep 17 00:00:00 2001 From: ChaiBapchya Date: Fri, 4 Oct 2019 13:57:05 -0700 Subject: [PATCH 09/13] fix dropout --- tests/nightly/test_large_array.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/nightly/test_large_array.py b/tests/nightly/test_large_array.py index 099084bbe530..580db98935d6 100644 --- a/tests/nightly/test_large_array.py +++ b/tests/nightly/test_large_array.py @@ -742,7 +742,8 @@ def test_dropout(): exe = y.simple_bind(ctx=default_context(), data=shape) exe.arg_arrays[0][:] = 1 out = exe.forward(is_train=True) - assert out.shape == out.shape + nd.waitall() + assert out[0].shape == shape def test_activation(): From 3a4025fb73c5dfe0771809b70c3a3117a0e70237 Mon Sep 17 00:00:00 2001 From: ChaiBapchya Date: Wed, 9 Oct 2019 13:31:19 -0700 Subject: [PATCH 10/13] fix index copy --- src/operator/contrib/index_copy-inl.h | 2 +- src/operator/contrib/index_copy.cc | 4 ++-- tests/nightly/test_large_array.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/operator/contrib/index_copy-inl.h b/src/operator/contrib/index_copy-inl.h index 9f78f0593ed1..35bfcd0e77b6 100644 --- a/src/operator/contrib/index_copy-inl.h +++ b/src/operator/contrib/index_copy-inl.h @@ -71,7 +71,7 @@ inline bool IndexCopyShape(const nnvm::NodeAttrs& attrs, CHECK_EQ(in_attrs->at(0)[i], in_attrs->at(2)[i]); } } - // The the length of the fitrst dim of copied tensor + // The the length of the first dim of copied tensor // must equal to the size of index vector CHECK_EQ(in_attrs->at(1)[0], in_attrs->at(2)[0]); SHAPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0)); diff --git a/src/operator/contrib/index_copy.cc b/src/operator/contrib/index_copy.cc index f272a8860d85..9a071c04b51c 100644 --- a/src/operator/contrib/index_copy.cc +++ b/src/operator/contrib/index_copy.cc @@ -28,12 +28,12 @@ namespace op { struct index_copy_fwd_cpu { template - static void Map(int i, + static void Map(index_t i, const DType* new_tensor, const IType* idx, DType* out_tensor, int dim_size) { - DType* out_ptr = out_tensor + static_cast(idx[i]) * dim_size; + DType* out_ptr = out_tensor + static_cast(idx[i]) * dim_size; const DType* new_ptr = new_tensor + i * dim_size; std::memcpy(out_ptr, new_ptr, sizeof(DType) * dim_size); } diff --git a/tests/nightly/test_large_array.py b/tests/nightly/test_large_array.py index 580db98935d6..5674f9826510 100644 --- a/tests/nightly/test_large_array.py +++ b/tests/nightly/test_large_array.py @@ -601,7 +601,7 @@ def test_softmax_cross_entropy(): def test_index_copy(): x = mx.nd.zeros((LARGE_X, SMALL_Y)) t = mx.nd.arange(1, SMALL_Y + 1).reshape((1, SMALL_Y)) - index = mx.nd.array([LARGE_X - 1]) + index = mx.nd.array([LARGE_X - 1], dtype="int64") x = mx.nd.contrib.index_copy(x, index, t) assert x[-1][-1] == t[0][-1] From 50b40f993d960e4bbf363e838064732e02c205a7 Mon Sep 17 00:00:00 2001 From: ChaiBapchya Date: Thu, 10 Oct 2019 13:49:00 -0700 Subject: [PATCH 11/13] add teardown and fix lint --- tests/nightly/test_large_array.py | 109 +++++++++++++++++++++++++++++ tests/nightly/test_large_vector.py | 91 ++++++++++++++++++++---- 2 files changed, 187 insertions(+), 13 deletions(-) diff --git a/tests/nightly/test_large_array.py b/tests/nightly/test_large_array.py index 5674f9826510..b77bb5b367dd 100644 --- a/tests/nightly/test_large_array.py +++ b/tests/nightly/test_large_array.py @@ -22,6 +22,7 @@ from mxnet.test_utils import rand_ndarray, assert_almost_equal, rand_coord_2d, default_context, check_symbolic_forward, create_2d_tensor from mxnet import gluon, nd from tests.python.unittest.common import with_seed, teardown +from nose.tools import with_setup # dimension constants MEDIUM_X = 10000 @@ -32,6 +33,7 @@ LARGE_SIZE = LARGE_X * SMALL_Y +@with_setup(teardown) def test_gluon_embedding(): m = gluon.nn.Embedding(SMALL_Y, MEDIUM_X) m.initialize() @@ -41,6 +43,7 @@ def test_gluon_embedding(): assert b.asnumpy().size == LARGE_SIZE +@with_setup(teardown) def test_ndarray_zeros(): a = nd.zeros(shape=(LARGE_X, SMALL_Y)) assert a[-1][0] == 0 @@ -48,12 +51,14 @@ def test_ndarray_zeros(): assert a.size == LARGE_SIZE +@with_setup(teardown) def test_ndarray_ones(): a = nd.ones(shape=(LARGE_X, SMALL_Y)) assert a[-1][0] == 1 assert nd.sum(a).asnumpy() == LARGE_SIZE +@with_setup(teardown) def test_ndarray_convert(): a = nd.zeros(shape=(LARGE_X, SMALL_Y)) b = a.astype(np.int32) @@ -62,12 +67,14 @@ def test_ndarray_convert(): assert isinstance(b, mx.nd.sparse.RowSparseNDArray) +@with_setup(teardown) @with_seed() def test_ndarray_random_uniform(): a = nd.random.uniform(shape=(LARGE_X, SMALL_Y)) assert a[-1][0] != 0 +@with_setup(teardown) @with_seed() def test_ndarray_random_randint(): a = nd.random.randint(100, 10000, shape=(LARGE_X, SMALL_Y)) @@ -82,6 +89,7 @@ def test_ndarray_random_randint(): assert a[-1][0].dtype == np.int64 +@with_setup(teardown) @with_seed() def test_ndarray_random_exponential(): scale_array = nd.random.uniform(shape=(MEDIUM_X, SMALL_X)) @@ -90,6 +98,7 @@ def test_ndarray_random_exponential(): assert a.shape == (MEDIUM_X, SMALL_X, SMALL_X, SMALL_Y) +@with_setup(teardown) @with_seed() def test_ndarray_random_gamma(): alpha_array = nd.random.uniform(shape=(MEDIUM_X, SMALL_X)) @@ -100,6 +109,7 @@ def test_ndarray_random_gamma(): assert a.shape == (MEDIUM_X, SMALL_X, SMALL_X, SMALL_Y) +@with_setup(teardown) @with_seed() def test_ndarray_random_multinomial(): # test 1 shape dimension @@ -117,6 +127,7 @@ def test_ndarray_random_multinomial(): assert a[0].shape == (LARGE_X, 2, SMALL_Y) and a[0].shape == a[1].shape +@with_setup(teardown) @with_seed() def test_ndarray_random_generalized_negative_binomial(): alpha_array = nd.random.uniform(shape=(MEDIUM_X, SMALL_X)) @@ -127,6 +138,7 @@ def test_ndarray_random_generalized_negative_binomial(): assert a.shape == (MEDIUM_X, SMALL_X, SMALL_X, SMALL_Y) +@with_setup(teardown) @with_seed() def test_ndarray_random_negative_binomial(): k_array = nd.random.uniform(shape=(MEDIUM_X, SMALL_X)) @@ -137,6 +149,7 @@ def test_ndarray_random_negative_binomial(): assert a.shape == (MEDIUM_X, SMALL_X, SMALL_X, SMALL_Y) +@with_setup(teardown) @with_seed() def test_ndarray_random_normal(): scale_array = nd.random.uniform(shape=(MEDIUM_X, SMALL_X)) @@ -146,6 +159,7 @@ def test_ndarray_random_normal(): assert a.shape == (MEDIUM_X, SMALL_X, SMALL_X, SMALL_Y) +@with_setup(teardown) @with_seed() def test_ndarray_random_poisson(): lambda_array = nd.random.uniform(shape=(MEDIUM_X, SMALL_X)) @@ -154,6 +168,7 @@ def test_ndarray_random_poisson(): assert a.shape == (MEDIUM_X, SMALL_X, SMALL_X, SMALL_Y) +@with_setup(teardown) @with_seed() def test_ndarray_random_randn(): a = nd.random.randn(LARGE_X, SMALL_Y) @@ -162,6 +177,7 @@ def test_ndarray_random_randn(): # Add check for (x,y,m,n) where x,y shape of loc,scale and m,n input shape +@with_setup(teardown) @with_seed() def test_ndarray_random_shuffle(): a = nd.ones(shape=(LARGE_X, SMALL_Y)) @@ -178,11 +194,13 @@ def test_ndarray_random_shuffle(): assert a.shape == (LARGE_X, SMALL_Y) +@with_setup(teardown) def test_ndarray_empty(): a = nd.empty((LARGE_X, SMALL_Y)) assert a.shape == (LARGE_X, SMALL_Y) +@with_setup(teardown) def test_elementwise(): a = nd.ones(shape=(LARGE_X, SMALL_Y)) b = nd.ones(shape=(LARGE_X, SMALL_Y)) @@ -194,11 +212,13 @@ def test_elementwise(): assert np.sum(res[-1].asnumpy() == 2) == a.shape[1] +@with_setup(teardown) def test_reduce(): a = nd.ones(shape=(LARGE_X, SMALL_Y)) assert nd.sum(a).asnumpy() == a.shape[0] * a.shape[1] +@with_setup(teardown) def test_dot(): a = nd.ones(shape=(LARGE_X, SMALL_Y)) b = nd.ones(shape=(SMALL_Y, SMALL_Y)) @@ -206,6 +226,7 @@ def test_dot(): assert np.sum(res[-1].asnumpy() == SMALL_Y) == b.shape[1] +@with_setup(teardown) def test_FullyConnected(): a = nd.ones(shape=(LARGE_X, SMALL_Y)) b = nd.ones(shape=(SMALL_Y, SMALL_Y)) @@ -213,6 +234,7 @@ def test_FullyConnected(): assert np.sum(res[-1].asnumpy() == SMALL_Y) == b.shape[1] +@with_setup(teardown) def test_broadcast(): a = nd.ones(shape=(LARGE_X, SMALL_Y)) b = nd.arange(0, LARGE_X).reshape(LARGE_X, 1) @@ -222,12 +244,14 @@ def test_broadcast(): assert np.sum(res[-1].asnumpy() == LARGE_X) == a.shape[1] +@with_setup(teardown) def test_clip(): a = nd.arange(0, LARGE_X * SMALL_Y).reshape(LARGE_X, SMALL_Y) res = nd.clip(a, a_min=100, a_max=1000) assert np.sum(res[-1].asnumpy() == 1000) == a.shape[1] +@with_setup(teardown) def test_split(): a = nd.arange(0, LARGE_X * SMALL_Y).reshape(LARGE_X, SMALL_Y) outs = nd.split(a, num_outputs=SMALL_Y, axis=1) @@ -235,18 +259,21 @@ def test_split(): assert result == a.shape[1] +@with_setup(teardown) def test_argmin(): a = nd.arange(0, LARGE_X * SMALL_Y).reshape(LARGE_X, SMALL_Y) idx = mx.nd.argmin(a, axis=0) assert idx.shape[0] == SMALL_Y +@with_setup(teardown) def test_tile(): a = nd.arange(0, LARGE_X).reshape(LARGE_X, 1) b = nd.tile(a, reps=(1, SMALL_Y)) assert np.sum(b[-1].asnumpy() == LARGE_X) == b.shape[1] +@with_setup(teardown) def test_take(): a = nd.ones(shape=(LARGE_X, SMALL_Y)) idx = nd.arange(LARGE_X - 1000, LARGE_X) @@ -254,18 +281,21 @@ def test_take(): assert np.sum(res[-1].asnumpy() == 1) == res.shape[1] +@with_setup(teardown) def test_slice(): a = nd.ones(shape=(LARGE_X, SMALL_Y)) res = nd.slice(a, begin=(LARGE_X-1000, 1), end=(LARGE_X, SMALL_Y)) assert np.sum(res[-1].asnumpy() == 1) == res.shape[1] +@with_setup(teardown) def test_slice_assign(): a = nd.ones(shape=(LARGE_X, SMALL_Y)) a[LARGE_X-1:LARGE_X] = 1000 assert np.sum(a[-1].asnumpy() == 1000) == a.shape[1] +@with_setup(teardown) def test_expand_dims(): a = nd.ones(shape=(LARGE_X, SMALL_Y)) res = nd.expand_dims(a, axis=1) @@ -273,6 +303,7 @@ def test_expand_dims(): assert res.shape == (a.shape[0], 1, a.shape[1]) +@with_setup(teardown) def test_squeeze(): a = nd.ones(shape=(LARGE_X, SMALL_Y)) data = nd.expand_dims(a, axis=1) @@ -280,6 +311,7 @@ def test_squeeze(): assert res.shape == a.shape +@with_setup(teardown) def test_broadcast_div(): a = nd.ones(shape=(LARGE_X, SMALL_Y)) b = nd.ones(shape=(LARGE_X, 1)) * 2 @@ -287,6 +319,7 @@ def test_broadcast_div(): assert np.sum(res[-1].asnumpy() == 0.5) == a.shape[1] +@with_setup(teardown) def test_Dense(ctx=mx.cpu(0)): data = mx.nd.ones(shape=(50*1000*1000, 100)) linear = gluon.nn.Dense(100) @@ -295,6 +328,7 @@ def test_Dense(ctx=mx.cpu(0)): assert res.shape == (50000000, 100) +@with_setup(teardown) def test_where(): a = nd.ones(shape=(LARGE_X, SMALL_Y)) b = nd.arange(0, LARGE_X * SMALL_Y).reshape(LARGE_X, SMALL_Y) @@ -305,6 +339,7 @@ def test_where(): assert np.sum(res[0].asnumpy() == 1) == 10 +@with_setup(teardown) def test_pick(): a = mx.nd.ones(shape=(256 * 35, 1024 * 1024)) b = mx.nd.ones(shape=(256 * 35, )) @@ -312,6 +347,7 @@ def test_pick(): assert res.shape == b.shape +@with_setup(teardown) def test_depthtospace(): def numpy_depth_to_space(x, blocksize): b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3] @@ -330,6 +366,7 @@ def numpy_depth_to_space(x, blocksize): assert_almost_equal(output.asnumpy(), expected, atol=1e-3, rtol=1e-3) +@with_setup(teardown) def test_spacetodepth(): def numpy_space_to_depth(x, blocksize): b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3] @@ -348,6 +385,7 @@ def numpy_space_to_depth(x, blocksize): assert_almost_equal(output.asnumpy(), expected, atol=1e-3, rtol=1e-3) +@with_setup(teardown) @with_seed() def test_diag(): a_np = np.random.random((LARGE_X, SMALL_Y)).astype(np.float32) @@ -373,6 +411,7 @@ def test_diag(): assert_almost_equal(r.asnumpy(), np.diag(a_np, k=k)) +@with_setup(teardown) @with_seed() def test_ravel_multi_index(): x1, y1 = rand_coord_2d((LARGE_X - 100), LARGE_X, 10, SMALL_Y) @@ -385,6 +424,7 @@ def test_ravel_multi_index(): assert np.sum(1 for i in range(idx.size) if idx[i] == idx_numpy[i]) == 3 +@with_setup(teardown) @with_seed() def test_unravel_index(): x1, y1 = rand_coord_2d((LARGE_X - 100), LARGE_X, 10, SMALL_Y) @@ -397,6 +437,7 @@ def test_unravel_index(): assert (indices_2d.asnumpy() == np.array(original_2d_indices)).all() +@with_setup(teardown) def test_transpose(): b = create_2d_tensor(rows=LARGE_X, columns=SMALL_Y) t = b.T @@ -404,6 +445,7 @@ def test_transpose(): assert t.shape == (SMALL_Y, LARGE_X) +@with_setup(teardown) def test_swapaxes(): b = create_2d_tensor(rows=LARGE_X, columns=SMALL_Y) t = nd.swapaxes(b, dim1=0, dim2=1) @@ -411,6 +453,7 @@ def test_swapaxes(): assert t.shape == (SMALL_Y, LARGE_X) +@with_setup(teardown) def test_flip(): b = create_2d_tensor(rows=LARGE_X, columns=SMALL_Y) t = nd.flip(b, axis=0) @@ -418,6 +461,7 @@ def test_flip(): assert t.shape == (LARGE_X, SMALL_Y) +@with_setup(teardown) def test_softmax(): input_data = mx.nd.ones((SMALL_Y, LARGE_X)) true_output = np.full((SMALL_Y, LARGE_X), (1 / SMALL_Y)) @@ -425,6 +469,7 @@ def test_softmax(): assert_almost_equal(output.asnumpy(), true_output, rtol=1e-5, atol=1e-5) +@with_setup(teardown) def test_argsort(): b = create_2d_tensor(rows=LARGE_X, columns=SMALL_Y) s = nd.argsort(b, axis=0, is_ascend=False, dtype=np.int64) @@ -432,6 +477,7 @@ def test_argsort(): assert (s[0].asnumpy() == (LARGE_X - 1)).all() +@with_setup(teardown) def test_sort(): b = create_2d_tensor(rows=LARGE_X, columns=SMALL_Y) s = nd.sort(b, axis=0, is_ascend=False) @@ -440,6 +486,7 @@ def test_sort(): assert np.sum(s[0].asnumpy() == 0).all() +@with_setup(teardown) def test_topk(): b = create_2d_tensor(rows=LARGE_X, columns=SMALL_Y) k = nd.topk(b, k=10, axis=0, dtype=np.int64) @@ -452,6 +499,7 @@ def test_topk(): assert l.sum() == np.sum(np.arange(0, SMALL_Y)) +@with_setup(teardown) def test_exponent_logarithm_operators(): a = 2*nd.ones(shape=(LARGE_X, SMALL_Y)) # exponent @@ -485,6 +533,7 @@ def test_exponent_logarithm_operators(): assert result.shape == a.shape +@with_setup(teardown) def test_power_operators(): a = 2*nd.ones(shape=(LARGE_X, SMALL_Y)) # sqrt @@ -518,6 +567,7 @@ def test_power_operators(): assert result.shape == a.shape +@with_setup(teardown) def test_sequence_mask(): # Sequence Mask input [max_sequence_length, batch_size, other_feature_dims] # test with input batch_size = 2 @@ -541,6 +591,7 @@ def test_sequence_mask(): assert b[-1][-1][-1] == -1 +@with_setup(teardown) def test_sequence_reverse(): a = nd.arange(0, LARGE_X * SMALL_Y * 2).reshape(LARGE_X, 2, SMALL_Y) # test as reverse operator @@ -557,6 +608,7 @@ def test_sequence_reverse(): assert b.shape == a.shape +@with_setup(teardown) def test_sequence_last(): a = nd.arange(0, LARGE_X * SMALL_Y * 2).reshape(LARGE_X, 2, SMALL_Y) @@ -574,6 +626,7 @@ def test_sequence_last(): assert b[0][-1] == a[1][0][-1] +@with_setup(teardown) def test_softmax_cross_entropy(): # dtype of input data, mxnet cross entropy set explicitly to float64 # numpy implicitly takes care of double precision @@ -598,6 +651,7 @@ def test_softmax_cross_entropy(): true_softmax_cross_entropy, rtol=1e-3, atol=1e-5) +@with_setup(teardown) def test_index_copy(): x = mx.nd.zeros((LARGE_X, SMALL_Y)) t = mx.nd.arange(1, SMALL_Y + 1).reshape((1, SMALL_Y)) @@ -607,6 +661,7 @@ def test_index_copy(): assert x[-1][-1] == t[0][-1] +@with_setup(teardown) def testSoftmaxOutput(): x = mx.sym.Variable('x') label = mx.sym.Variable('label') @@ -633,6 +688,7 @@ def testSoftmaxOutput(): # TODO: correctness of prelu (currently flaky) +@with_setup(teardown) def test_leaky_relu(): a = -1*mx.nd.ones((LARGE_X, SMALL_Y)) @@ -662,6 +718,7 @@ def test_rrelu(): test_rrelu() +@with_setup(teardown) def test_pooling(): a = mx.nd.ones((MEDIUM_X, 200, SMALL_Y, SMALL_Y)) @@ -695,6 +752,7 @@ def test_lp_pooling(): test_lp_pooling() +@with_setup(teardown) def test_layer_norm(): dtype = np.float32 forward_check_eps = 1E-3 @@ -735,6 +793,7 @@ def npy_layer_norm(data, gamma, beta, axis=1, eps=1E-5): # TODO: correctness of dropout # currently only test for dropout to work # since testing for correctness involves flakiness issue #14288 +@with_setup(teardown) def test_dropout(): shape = (LARGE_X, SMALL_Y) x = mx.sym.var('data') @@ -746,6 +805,7 @@ def test_dropout(): assert out[0].shape == shape +@with_setup(teardown) def test_activation(): x = mx.nd.ones((LARGE_X, SMALL_Y)) test_x = -2 @@ -778,6 +838,7 @@ def test_activation(): # TODO: correctness of batchnorm # in future, we could test if mean, var of output # matches target output's mean, var +@with_setup(teardown) def test_batchnorm(): shape = (LARGE_X, SMALL_Y) axis = 1 # default @@ -794,6 +855,7 @@ def test_batchnorm(): assert output.shape == shape +@with_setup(teardown) def test_add(): a = nd.ones(shape=(LARGE_X, SMALL_Y)) b = nd.ones(shape=(LARGE_X, SMALL_Y)) @@ -803,6 +865,7 @@ def test_add(): assert c.shape == a.shape +@with_setup(teardown) def test_sub(): a = 3*nd.ones(shape=(LARGE_X, SMALL_Y)) b = nd.ones(shape=(LARGE_X, SMALL_Y)) @@ -812,6 +875,7 @@ def test_sub(): assert c.shape == a.shape +@with_setup(teardown) def test_rsub(): a = 3*nd.ones(shape=(LARGE_X, SMALL_Y)) b = nd.ones(shape=(LARGE_X, SMALL_Y)) @@ -821,6 +885,7 @@ def test_rsub(): assert c.shape == a.shape +@with_setup(teardown) def test_neg(): a = nd.ones(shape=(LARGE_X, SMALL_Y)) c = a @@ -829,6 +894,7 @@ def test_neg(): assert c.shape == a.shape +@with_setup(teardown) def test_mul(): a = 2*nd.ones(shape=(LARGE_X, SMALL_Y)) b = 3*nd.ones(shape=(LARGE_X, SMALL_Y)) @@ -838,6 +904,7 @@ def test_mul(): assert c.shape == a.shape +@with_setup(teardown) def test_div(): a = 2*nd.ones(shape=(LARGE_X, SMALL_Y)) b = 3*nd.ones(shape=(LARGE_X, SMALL_Y)) @@ -847,6 +914,7 @@ def test_div(): assert c.shape == a.shape +@with_setup(teardown) def test_rdiv(): a = 2*nd.ones(shape=(LARGE_X, SMALL_Y)) b = 3*nd.ones(shape=(LARGE_X, SMALL_Y)) @@ -856,6 +924,7 @@ def test_rdiv(): assert c.shape == a.shape +@with_setup(teardown) def test_mod(): a = 2*nd.ones(shape=(LARGE_X, SMALL_Y)) b = 3*nd.ones(shape=(LARGE_X, SMALL_Y)) @@ -865,6 +934,7 @@ def test_mod(): assert c.shape == a.shape +@with_setup(teardown) def test_rmod(): a = 2*nd.ones(shape=(LARGE_X, SMALL_Y)) b = 3*nd.ones(shape=(LARGE_X, SMALL_Y)) @@ -874,6 +944,7 @@ def test_rmod(): assert c.shape == a.shape +@with_setup(teardown) def test_imod(): a = 2*nd.ones(shape=(LARGE_X, SMALL_Y)) b = 3*nd.ones(shape=(LARGE_X, SMALL_Y)) @@ -883,6 +954,7 @@ def test_imod(): assert c.shape == a.shape +@with_setup(teardown) def test_pow(): a = 2*nd.ones(shape=(LARGE_X, SMALL_Y)) b = 3*nd.ones(shape=(LARGE_X, SMALL_Y)) @@ -892,6 +964,7 @@ def test_pow(): assert c.shape == a.shape +@with_setup(teardown) def test_rpow(): a = 2*nd.ones(shape=(LARGE_X, SMALL_Y)) b = 3*nd.ones(shape=(LARGE_X, SMALL_Y)) @@ -901,18 +974,21 @@ def test_rpow(): assert c.shape == a.shape +@with_setup(teardown) def test_shape(): b = create_2d_tensor(rows=SMALL_Y, columns=LARGE_X) mx.nd.waitall() assert b.shape == (SMALL_Y, LARGE_X) +@with_setup(teardown) def test_size(): b = create_2d_tensor(rows=SMALL_Y, columns=LARGE_X) mx.nd.waitall() assert b.size == LARGE_SIZE +@with_setup(teardown) def test_copy(): a = nd.ones((SMALL_Y, LARGE_X)) b = a.copy() @@ -921,6 +997,7 @@ def test_copy(): assert b.size == LARGE_SIZE +@with_setup(teardown) def test_copy_to(): a = create_2d_tensor(rows=SMALL_Y, columns=LARGE_X) b = nd.array(np.zeros((SMALL_Y, LARGE_X))) @@ -929,6 +1006,7 @@ def test_copy_to(): assert b[-1][-1] == SMALL_Y-1 +@with_setup(teardown) def test_zeros_like(): a = nd.array(np.ones((SMALL_Y, LARGE_X))) b = nd.zeros_like(a) @@ -936,6 +1014,7 @@ def test_zeros_like(): assert b.shape == a.shape +@with_setup(teardown) def test_ones_like(): a = nd.array(np.zeros((SMALL_Y, LARGE_X))) b = nd.ones_like(a) @@ -943,6 +1022,7 @@ def test_ones_like(): assert b.shape == a.shape +@with_setup(teardown) def test_reshape_like(): a = nd.array(np.zeros((SMALL_Y, LARGE_X))) b = nd.array(np.zeros((SMALL_Y//2, LARGE_X*2))) @@ -950,6 +1030,7 @@ def test_reshape_like(): assert c.shape == (SMALL_Y//2, LARGE_X*2) +@with_setup(teardown) def test_flatten(): a = create_2d_tensor(rows=LARGE_X, columns=SMALL_Y).reshape((LARGE_X//2, 2, SMALL_Y)) b = nd.flatten(a) @@ -958,6 +1039,7 @@ def test_flatten(): assert b.shape == (LARGE_X//2, SMALL_Y*2) +@with_setup(teardown) def test_concat(): a = nd.array(np.ones((SMALL_Y, LARGE_X))) b = nd.array(np.zeros((SMALL_Y, LARGE_X))) @@ -965,6 +1047,7 @@ def test_concat(): assert c.shape == (b.shape[0]*2, LARGE_X) +@with_setup(teardown) def test_stack(): a = nd.array(np.ones((SMALL_Y, LARGE_X))) b = nd.array(np.zeros((SMALL_Y, LARGE_X))) @@ -972,30 +1055,35 @@ def test_stack(): assert c.shape == (b.shape[0], 2, LARGE_X) +@with_setup(teardown) def test_broadcast_axes(): a = create_2d_tensor(rows=1, columns=LARGE_X) b = nd.broadcast_axis(a, axis=[0], size=2) assert b.shape == (a.shape[0]*2, a.shape[1]) +@with_setup(teardown) def test_sum(): a = nd.array(np.ones((SMALL_Y, LARGE_X))) b = nd.sum(a, axis=1) assert b.shape[0] == SMALL_Y +@with_setup(teardown) def test_prod(): a = nd.array(np.ones((SMALL_Y, LARGE_X))) b = nd.prod(a, axis=1) assert b.shape[0] == SMALL_Y +@with_setup(teardown) def test_mean(): a = create_2d_tensor(rows=SMALL_Y, columns=LARGE_X) b = nd.mean(a, axis=0) assert b[0] == (SMALL_Y/2-1) +@with_setup(teardown) def test_min(): a = create_2d_tensor(rows=SMALL_Y, columns=LARGE_X) b = nd.min(a, axis=0) @@ -1003,6 +1091,7 @@ def test_min(): assert b[-1] == 0 +@with_setup(teardown) def test_max(): a = create_2d_tensor(rows=SMALL_Y, columns=LARGE_X) b = nd.max(a, axis=0) @@ -1010,6 +1099,7 @@ def test_max(): assert b[-1] == (SMALL_Y-1) +@with_setup(teardown) def test_norm(): a = np.array(np.full((1, LARGE_X), 3)) b = np.array(np.full((1, LARGE_X), 4)) @@ -1022,6 +1112,7 @@ def test_norm(): assert e[-1] == 7 +@with_setup(teardown) def test_argmax(): a = np.ones((SMALL_Y, LARGE_X)) b = np.zeros((SMALL_Y, LARGE_X)) @@ -1031,6 +1122,7 @@ def test_argmax(): assert d[-1] == d[0] == 0 +@with_setup(teardown) def test_relu(): def frelu(x): return np.maximum(x, 0.0) @@ -1048,6 +1140,7 @@ def frelu_grad(x): check_symbolic_forward(y, [xa], [ya]) +@with_setup(teardown) def test_sigmoid(): def fsigmoid(a): return np.divide(1.0, (1.0 + np.exp(-a))) @@ -1059,6 +1152,7 @@ def fsigmoid(a): check_symbolic_forward(y, [xa], [ya]) +@with_setup(teardown) def np_softmax(x, axis=-1, temperature=1.0): x = x - np.max(x, axis=axis, keepdims=True) x = np.exp(x/temperature) @@ -1066,6 +1160,7 @@ def np_softmax(x, axis=-1, temperature=1.0): return x +@with_setup(teardown) def test_log_softmax(): ndim = 2 shape = (SMALL_Y, LARGE_X) @@ -1075,6 +1170,7 @@ def test_log_softmax(): check_symbolic_forward(sym, [data], [np.log(np_softmax(data, axis=axis)+1e-20)]) +@with_setup(teardown) def test_iadd(): a = nd.array(np.ones((SMALL_Y, LARGE_X))) b = nd.array(np.ones((SMALL_Y, LARGE_X))) @@ -1084,6 +1180,7 @@ def test_iadd(): assert c[0][-1] == 2 +@with_setup(teardown) def test_isub(): a = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 3))) b = nd.array(np.ones((SMALL_Y, LARGE_X))) @@ -1093,6 +1190,7 @@ def test_isub(): assert c[0][-1] == 2 +@with_setup(teardown) def test_imul(): a = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 3))) b = nd.array(np.ones((SMALL_Y, LARGE_X))) @@ -1102,6 +1200,7 @@ def test_imul(): assert c[0][-1] == 3 +@with_setup(teardown) def test_idiv(): a = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 4))) b = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 2))) @@ -1111,6 +1210,7 @@ def test_idiv(): assert c[0][-1] == 2 +@with_setup(teardown) def test_eq(): a = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 3))) b = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 3))) @@ -1118,6 +1218,7 @@ def test_eq(): assert np.sum(c[0].asnumpy() == 1).all() +@with_setup(teardown) def test_neq(): a = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 2))) b = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 3))) @@ -1125,6 +1226,7 @@ def test_neq(): assert np.sum(c[0].asnumpy() == 1).all() +@with_setup(teardown) def test_lt(): a = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 2))) b = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 3))) @@ -1132,6 +1234,7 @@ def test_lt(): assert np.sum(d[0].asnumpy() == 1).all() +@with_setup(teardown) def test_lte(): a = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 2))) b = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 3))) @@ -1142,6 +1245,7 @@ def test_lte(): assert np.sum(e[0].asnumpy() == 1).all() +@with_setup(teardown) def test_gt(): a = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 3))) b = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 2))) @@ -1149,6 +1253,7 @@ def test_gt(): assert np.sum(d[0].asnumpy() == 1).all() +@with_setup(teardown) def test_gte(): a = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 3))) b = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 2))) @@ -1159,6 +1264,7 @@ def test_gte(): assert np.sum(e[0].asnumpy() == 1).all() +@with_setup(teardown) def test_slice_like(): a = create_2d_tensor(rows=SMALL_Y, columns=LARGE_X) b = nd.array(np.ones((SMALL_Y//2, LARGE_X//2))) @@ -1173,6 +1279,7 @@ def test_slice_like(): assert e[-1][-1] == (SMALL_Y-1) +@with_setup(teardown) def test_slice_axis(): a = create_2d_tensor(rows=SMALL_Y, columns=LARGE_X) c = nd.slice_axis(a, axis=0, begin=0, end=SMALL_Y//2) @@ -1183,6 +1290,7 @@ def test_slice_axis(): assert d[-1][-1] == (SMALL_Y-1) +@with_setup(teardown) def test_one_hot(): # default dtype of ndarray is float32 which cannot index elements over 2^32 a = nd.array([1, (VLARGE_X - 1)], dtype=np.int64) @@ -1191,6 +1299,7 @@ def test_one_hot(): b[1][-1] == 1 +@with_setup(teardown) def test_full(): a = nd.full((SMALL_Y, LARGE_X), 3) assert a.shape == (SMALL_Y, LARGE_X) diff --git a/tests/nightly/test_large_vector.py b/tests/nightly/test_large_vector.py index 169f5244d784..f857d8ae4933 100644 --- a/tests/nightly/test_large_vector.py +++ b/tests/nightly/test_large_vector.py @@ -22,12 +22,14 @@ from mxnet.test_utils import rand_ndarray, assert_almost_equal, rand_coord_2d, create_vector from mxnet import gluon, nd from tests.python.unittest.common import with_seed, teardown +from nose.tools import with_setup # dimension constants LARGE_X = 4300000000 MEDIUM_X = 1000000000 +@with_setup(teardown) def test_slice(): a = nd.ones(LARGE_X) res = nd.slice(a, begin=(LARGE_X - MEDIUM_X), end=LARGE_X) @@ -35,6 +37,7 @@ def test_slice(): assert res[0] == 1 +@with_setup(teardown) def test_ndarray_zeros(): a = nd.zeros(shape=LARGE_X) assert a[-1] == 0 @@ -42,18 +45,21 @@ def test_ndarray_zeros(): assert a.size == LARGE_X +@with_setup(teardown) def test_ndarray_ones(): a = nd.ones(shape=LARGE_X) assert a[-1] == 1 assert nd.sum(a) == LARGE_X +@with_setup(teardown) @with_seed() def test_ndarray_random_uniform(): a = nd.random.uniform(shape=LARGE_X) assert a[-1] != 0 +@with_setup(teardown) @with_seed() def test_ndarray_random_randint(): # check if randint can generate value greater than 2**32 (large) @@ -64,11 +70,13 @@ def test_ndarray_random_randint(): assert (a >= low).all() and (a < high).all() +@with_setup(teardown) def test_ndarray_empty(): a = nd.empty(LARGE_X) assert a.shape == (LARGE_X,) +@with_setup(teardown) def test_elementwise(): a = nd.ones(shape=LARGE_X) b = nd.ones(shape=LARGE_X) @@ -80,12 +88,14 @@ def test_elementwise(): assert res[-1].asnumpy() == 3 +@with_setup(teardown) def test_clip(): a = create_vector(LARGE_X) res = nd.clip(a, a_min=100, a_max=1000) assert res[-1] == 1000 +@with_setup(teardown) def test_argmin(): a = create_vector(LARGE_X, dtype=np.float32) assert a[0] == 0 @@ -94,6 +104,7 @@ def test_argmin(): assert idx.shape[0] == 1 +@with_setup(teardown) def test_take(): a = nd.ones(shape=LARGE_X) idx = nd.arange(LARGE_X - 1000, LARGE_X) @@ -101,12 +112,14 @@ def test_take(): assert np.sum(res.asnumpy() == 1) == res.shape[0] +@with_setup(teardown) def test_slice_assign(): a = nd.ones(shape=LARGE_X) a[LARGE_X-1:LARGE_X] = 1000 assert np.sum(a[-1].asnumpy() == 1000) == 1 +@with_setup(teardown) def test_expand_dims(): a = nd.ones(shape=LARGE_X) res = nd.expand_dims(a, axis=0) @@ -114,6 +127,7 @@ def test_expand_dims(): assert res.shape == (1, a.shape[0]) +@with_setup(teardown) def test_squeeze(): a = nd.ones(shape=LARGE_X) data = nd.expand_dims(a, axis=0) @@ -122,6 +136,7 @@ def test_squeeze(): assert res.shape == a.shape +@with_setup(teardown) def test_broadcast_div(): a = nd.ones(shape=LARGE_X) b = nd.ones(shape=LARGE_X) * 2 @@ -129,6 +144,7 @@ def test_broadcast_div(): assert np.sum(res.asnumpy() == 0.5) == a.shape[0] +@with_setup(teardown) def test_Dense(ctx=mx.cpu(0)): data = mx.nd.ones(shape=LARGE_X) linear = gluon.nn.Dense(2) @@ -137,12 +153,14 @@ def test_Dense(ctx=mx.cpu(0)): assert res.shape == (LARGE_X, 2) +@with_setup(teardown) def test_argsort(): a = create_vector(size=LARGE_X) s = nd.argsort(a, axis=0, is_ascend=False, dtype=np.int64) assert s[0] == (LARGE_X - 1) +@with_setup(teardown) def test_sort(): a = create_vector(size=LARGE_X) @@ -158,6 +176,7 @@ def test_ascend(x): test_ascend(a) +@with_setup(teardown) def test_topk(): a = create_vector(size=LARGE_X) ind = nd.topk(a, k=10, axis=0, dtype=np.int64) @@ -168,13 +187,15 @@ def test_topk(): val = nd.topk(a, k=1, axis=0, dtype=np.int64, ret_typ="value") assert val == (LARGE_X - 1) - + +@with_setup(teardown) def test_mean(): a = nd.arange(-LARGE_X // 2, LARGE_X // 2 + 1, dtype=np.int64) b = nd.mean(a, axis=0) assert b == 0 +@with_setup(teardown) @with_seed() def test_ndarray_random_exponential(): a = nd.random.exponential(shape=LARGE_X) @@ -182,6 +203,7 @@ def test_ndarray_random_exponential(): assert a.shape[0] == LARGE_X +@with_setup(teardown) @with_seed() def test_ndarray_random_gamma(): a = nd.random.gamma(shape=LARGE_X) @@ -189,6 +211,7 @@ def test_ndarray_random_gamma(): assert a.shape[0] == LARGE_X +@with_setup(teardown) @with_seed() def test_ndarray_random_generalized_negative_binomial(): a = nd.random.generalized_negative_binomial(shape=LARGE_X) @@ -196,6 +219,7 @@ def test_ndarray_random_generalized_negative_binomial(): assert a.shape[0] == LARGE_X +@with_setup(teardown) @with_seed() def test_ndarray_random_multinomial(): a = nd.random.multinomial(nd.random.uniform(shape=LARGE_X)) @@ -203,6 +227,7 @@ def test_ndarray_random_multinomial(): assert a.shape[0] == 1 +@with_setup(teardown) @with_seed() def test_ndarray_random_negative_binomial(): a = nd.random.negative_binomial(shape=LARGE_X) @@ -210,12 +235,14 @@ def test_ndarray_random_negative_binomial(): assert a.shape[0] == LARGE_X +@with_setup(teardown) @with_seed() def test_ndarray_random_normal(): a = nd.random.normal(shape=LARGE_X) assert a.shape[0] == LARGE_X +@with_setup(teardown) @with_seed() def test_ndarray_random_poisson(): a = nd.random.poisson(shape=LARGE_X) @@ -223,12 +250,14 @@ def test_ndarray_random_poisson(): assert a.shape[0] == LARGE_X +@with_setup(teardown) @with_seed() def test_ndarray_random_randn(): a = nd.random.randn(LARGE_X) assert a.shape[0] == LARGE_X +@with_setup(teardown) @with_seed() def test_ndarray_random_shuffle(): a = nd.ones(shape=LARGE_X) @@ -241,6 +270,7 @@ def test_ndarray_random_shuffle(): assert a.shape[0] == LARGE_X +@with_setup(teardown) def test_exponent_logarithm_operators(): a = 2*nd.ones(shape=LARGE_X) # exponent @@ -274,6 +304,7 @@ def test_exponent_logarithm_operators(): assert result.shape == a.shape +@with_setup(teardown) def test_power_operators(): a = 2*nd.ones(shape=LARGE_X) # sqrt @@ -307,6 +338,7 @@ def test_power_operators(): assert result.shape == a.shape +@with_setup(teardown) def test_sequence_mask(): # Sequence Mask input [max_sequence_length, batch_size] # test with input batch_size = 2 @@ -330,6 +362,7 @@ def test_sequence_mask(): assert b[-1][-1] == -1 +@with_setup(teardown) def test_sequence_reverse(): a = nd.arange(0, LARGE_X * 2).reshape(LARGE_X, 2) # test as reverse operator @@ -345,6 +378,7 @@ def test_sequence_reverse(): assert b.shape == a.shape +@with_setup(teardown) def test_sequence_last(): a = nd.arange(0, LARGE_X * 2).reshape(LARGE_X, 2) @@ -366,6 +400,7 @@ def test_sequence_last(): # TODO: correctness of layernorm # numpy implementation for large vector is flaky +@with_setup(teardown) def test_layer_norm(): axis = 0 eps = 1E-5 @@ -381,6 +416,7 @@ def test_layer_norm(): # TODO: correctness of batchnorm # in future, we could test if mean, var of output # matches target output's mean, var +@with_setup(teardown) def test_batchnorm(): shape = LARGE_X axis = 0 # since vector @@ -396,6 +432,7 @@ def test_batchnorm(): assert output.shape == (shape,) +@with_setup(teardown) def test_add(): a = nd.ones(shape=LARGE_X) b = nd.ones(shape=LARGE_X) @@ -405,6 +442,7 @@ def test_add(): assert c.shape == a.shape +@with_setup(teardown) def test_sub(): a = 3*nd.ones(shape=LARGE_X) b = nd.ones(shape=LARGE_X) @@ -414,6 +452,7 @@ def test_sub(): assert c.shape == a.shape +@with_setup(teardown) def test_rsub(): a = 3*nd.ones(shape=LARGE_X) b = nd.ones(shape=LARGE_X) @@ -423,6 +462,7 @@ def test_rsub(): assert c.shape == a.shape +@with_setup(teardown) def test_neg(): a = nd.ones(shape=LARGE_X) c = a @@ -431,6 +471,7 @@ def test_neg(): assert c.shape == a.shape +@with_setup(teardown) def test_mul(): a = 2*nd.ones(shape=LARGE_X) b = 3*nd.ones(shape=LARGE_X) @@ -440,6 +481,7 @@ def test_mul(): assert c.shape == a.shape +@with_setup(teardown) def test_div(): a = 2*nd.ones(shape=LARGE_X) b = 3*nd.ones(shape=LARGE_X) @@ -449,6 +491,7 @@ def test_div(): assert c.shape == a.shape +@with_setup(teardown) def test_rdiv(): a = 2*nd.ones(shape=LARGE_X) b = 3*nd.ones(shape=LARGE_X) @@ -458,6 +501,7 @@ def test_rdiv(): assert c.shape == a.shape +@with_setup(teardown) def test_mod(): a = 2*nd.ones(shape=LARGE_X) b = 3*nd.ones(shape=LARGE_X) @@ -467,6 +511,7 @@ def test_mod(): assert c.shape == a.shape +@with_setup(teardown) def test_rmod(): a = 2*nd.ones(shape=LARGE_X) b = 3*nd.ones(shape=LARGE_X) @@ -476,6 +521,7 @@ def test_rmod(): assert c.shape == a.shape +@with_setup(teardown) def test_imod(): a = 2*nd.ones(shape=LARGE_X) b = 3*nd.ones(shape=LARGE_X) @@ -485,6 +531,7 @@ def test_imod(): assert c.shape == a.shape +@with_setup(teardown) def test_pow(): a = 2*nd.ones(shape=LARGE_X) b = 3*nd.ones(shape=LARGE_X) @@ -494,6 +541,7 @@ def test_pow(): assert c.shape == a.shape +@with_setup(teardown) def test_rpow(): a = 2*nd.ones(shape=LARGE_X) b = 3*nd.ones(shape=LARGE_X) @@ -503,20 +551,23 @@ def test_rpow(): assert c.shape == a.shape +@with_setup(teardown) def test_shape(): b = create_vector(size=LARGE_X) - #explicit wait_to_read() + # explicit wait_to_read() assert b[0] == 0 assert b.shape[0] == LARGE_X +@with_setup(teardown) def test_size(): b = create_vector(size=LARGE_X) - #explicit wait_to_read() + # explicit wait_to_read() assert b[0] == 0 assert b.size == LARGE_X +@with_setup(teardown) def test_copy(): a = nd.ones(LARGE_X) b = a.copy() @@ -525,6 +576,7 @@ def test_copy(): assert b.size == LARGE_X +@with_setup(teardown) def test_copy_to(): a = create_vector(size=LARGE_X) # keeping dtype same as input uses parallel copy which is much faster @@ -535,6 +587,7 @@ def test_copy_to(): assert b[0] == 0 +@with_setup(teardown) def test_zeros_like(): a = nd.ones(LARGE_X) b = nd.zeros_like(a) @@ -542,6 +595,7 @@ def test_zeros_like(): assert b.shape == a.shape +@with_setup(teardown) def test_ones_like(): a = nd.zeros(LARGE_X) b = nd.ones_like(a) @@ -549,27 +603,31 @@ def test_ones_like(): assert b.shape == a.shape +@with_setup(teardown) def test_concat(): a = nd.ones(LARGE_X) b = nd.zeros(LARGE_X) - c = nd.concat(a,b, dim=0) + c = nd.concat(a, b, dim=0) assert c[0][0] == 1 assert c[-1][-1] == 0 assert c.shape[0] == (2 * LARGE_X) +@with_setup(teardown) def test_sum(): a = nd.ones(LARGE_X) b = nd.sum(a, axis=0) assert b[0] == LARGE_X +@with_setup(teardown) def test_prod(): a = nd.ones(LARGE_X) b = nd.prod(a, axis=0) assert b[0] == 1 +@with_setup(teardown) def test_min(): a = create_vector(size=LARGE_X) b = nd.min(a, axis=0) @@ -577,12 +635,14 @@ def test_min(): assert b[-1] == 0 +@with_setup(teardown) def test_max(): a = create_vector(size=LARGE_X) b = nd.max(a, axis=0) assert b[0] == (LARGE_X - 1) +@with_setup(teardown) def test_argmax(): a = nd.ones(LARGE_X) b = nd.zeros(LARGE_X) @@ -592,6 +652,7 @@ def test_argmax(): assert d == 0 +@with_setup(teardown) def np_softmax(x, axis=-1, temperature=1.0): x = x - np.max(x, axis=axis, keepdims=True) x = np.exp(x/temperature) @@ -599,6 +660,7 @@ def np_softmax(x, axis=-1, temperature=1.0): return x +@with_setup(teardown) def test_iadd(): a = nd.ones(LARGE_X) b = nd.ones(LARGE_X) @@ -608,6 +670,7 @@ def test_iadd(): assert c[-1] == 2 +@with_setup(teardown) def test_isub(): a = nd.full(LARGE_X, 3) b = nd.ones(LARGE_X) @@ -617,6 +680,7 @@ def test_isub(): assert c[-1] == 2 +@with_setup(teardown) def test_imul(): a = nd.full(LARGE_X, 3) b = nd.ones(LARGE_X) @@ -626,6 +690,7 @@ def test_imul(): assert c[-1] == 3 +@with_setup(teardown) def test_idiv(): a = nd.full(LARGE_X, 4) b = nd.full(LARGE_X, 2) @@ -635,15 +700,7 @@ def test_idiv(): assert c[-1] == 2 -def test_imod(): - a = nd.full(LARGE_X, 3) - b = nd.full(LARGE_X, 2) - c = a - c %= b - assert c.shape == a.shape - assert c[0][-1] == 1 - - +@with_setup(teardown) def test_eq(): a = nd.full(LARGE_X, 3) b = nd.full(LARGE_X, 3) @@ -651,6 +708,7 @@ def test_eq(): assert (c.asnumpy() == 1).all() +@with_setup(teardown) def test_neq(): a = nd.full(LARGE_X, 2) b = nd.full(LARGE_X, 3) @@ -658,6 +716,7 @@ def test_neq(): assert (c.asnumpy() == 1).all() +@with_setup(teardown) def test_lt(): a = nd.full(LARGE_X, 2) b = nd.full(LARGE_X, 3) @@ -665,6 +724,7 @@ def test_lt(): assert (d.asnumpy() == 1).all() +@with_setup(teardown) def test_lte(): a = nd.full(LARGE_X, 2) b = nd.full(LARGE_X, 3) @@ -675,6 +735,7 @@ def test_lte(): assert (d.asnumpy() == 1).all() +@with_setup(teardown) def test_gt(): a = nd.full(LARGE_X, 3) b = nd.full(LARGE_X, 2) @@ -682,6 +743,7 @@ def test_gt(): assert (d.asnumpy() == 1).all() +@with_setup(teardown) def test_gte(): a = nd.full(LARGE_X, 3) b = nd.full(LARGE_X, 2) @@ -692,6 +754,7 @@ def test_gte(): assert (d.asnumpy() == 1).all() +@with_setup(teardown) def test_slice_like(): a = create_vector(size=LARGE_X) b = nd.ones(LARGE_X//2) @@ -701,6 +764,7 @@ def test_slice_like(): assert c[-1] == (LARGE_X // 2 - 1) +@with_setup(teardown) def test_slice_axis(): a = create_vector(size=LARGE_X) med = LARGE_X // 2 @@ -709,6 +773,7 @@ def test_slice_axis(): assert c[-1][0] == (med - 1) +@with_setup(teardown) def test_full(): a = nd.full(LARGE_X, 3) assert a.shape[0] == LARGE_X From c48f70f2e71c7d4b5b6cc3bf4ff6a37710d74d9a Mon Sep 17 00:00:00 2001 From: ChaiBapchya Date: Fri, 11 Oct 2019 12:13:50 -0700 Subject: [PATCH 12/13] post test cleanup --- tests/nightly/test_large_array.py | 218 ++++++++++++++--------------- tests/nightly/test_large_vector.py | 148 ++++++++++---------- tests/python/unittest/common.py | 21 +++ 3 files changed, 204 insertions(+), 183 deletions(-) diff --git a/tests/nightly/test_large_array.py b/tests/nightly/test_large_array.py index b77bb5b367dd..748d6e49fabc 100644 --- a/tests/nightly/test_large_array.py +++ b/tests/nightly/test_large_array.py @@ -21,7 +21,7 @@ from mxnet.test_utils import rand_ndarray, assert_almost_equal, rand_coord_2d, default_context, check_symbolic_forward, create_2d_tensor from mxnet import gluon, nd -from tests.python.unittest.common import with_seed, teardown +from tests.python.unittest.common import with_seed, with_post_test_cleanup from nose.tools import with_setup # dimension constants @@ -33,7 +33,7 @@ LARGE_SIZE = LARGE_X * SMALL_Y -@with_setup(teardown) +@with_post_test_cleanup() def test_gluon_embedding(): m = gluon.nn.Embedding(SMALL_Y, MEDIUM_X) m.initialize() @@ -43,7 +43,7 @@ def test_gluon_embedding(): assert b.asnumpy().size == LARGE_SIZE -@with_setup(teardown) +@with_post_test_cleanup() def test_ndarray_zeros(): a = nd.zeros(shape=(LARGE_X, SMALL_Y)) assert a[-1][0] == 0 @@ -51,14 +51,14 @@ def test_ndarray_zeros(): assert a.size == LARGE_SIZE -@with_setup(teardown) +@with_post_test_cleanup() def test_ndarray_ones(): a = nd.ones(shape=(LARGE_X, SMALL_Y)) assert a[-1][0] == 1 assert nd.sum(a).asnumpy() == LARGE_SIZE -@with_setup(teardown) +@with_post_test_cleanup() def test_ndarray_convert(): a = nd.zeros(shape=(LARGE_X, SMALL_Y)) b = a.astype(np.int32) @@ -67,14 +67,14 @@ def test_ndarray_convert(): assert isinstance(b, mx.nd.sparse.RowSparseNDArray) -@with_setup(teardown) +@with_post_test_cleanup() @with_seed() def test_ndarray_random_uniform(): a = nd.random.uniform(shape=(LARGE_X, SMALL_Y)) assert a[-1][0] != 0 -@with_setup(teardown) +@with_post_test_cleanup() @with_seed() def test_ndarray_random_randint(): a = nd.random.randint(100, 10000, shape=(LARGE_X, SMALL_Y)) @@ -89,7 +89,7 @@ def test_ndarray_random_randint(): assert a[-1][0].dtype == np.int64 -@with_setup(teardown) +@with_post_test_cleanup() @with_seed() def test_ndarray_random_exponential(): scale_array = nd.random.uniform(shape=(MEDIUM_X, SMALL_X)) @@ -98,7 +98,7 @@ def test_ndarray_random_exponential(): assert a.shape == (MEDIUM_X, SMALL_X, SMALL_X, SMALL_Y) -@with_setup(teardown) +@with_post_test_cleanup() @with_seed() def test_ndarray_random_gamma(): alpha_array = nd.random.uniform(shape=(MEDIUM_X, SMALL_X)) @@ -109,7 +109,7 @@ def test_ndarray_random_gamma(): assert a.shape == (MEDIUM_X, SMALL_X, SMALL_X, SMALL_Y) -@with_setup(teardown) +@with_post_test_cleanup() @with_seed() def test_ndarray_random_multinomial(): # test 1 shape dimension @@ -127,7 +127,7 @@ def test_ndarray_random_multinomial(): assert a[0].shape == (LARGE_X, 2, SMALL_Y) and a[0].shape == a[1].shape -@with_setup(teardown) +@with_post_test_cleanup() @with_seed() def test_ndarray_random_generalized_negative_binomial(): alpha_array = nd.random.uniform(shape=(MEDIUM_X, SMALL_X)) @@ -138,7 +138,7 @@ def test_ndarray_random_generalized_negative_binomial(): assert a.shape == (MEDIUM_X, SMALL_X, SMALL_X, SMALL_Y) -@with_setup(teardown) +@with_post_test_cleanup() @with_seed() def test_ndarray_random_negative_binomial(): k_array = nd.random.uniform(shape=(MEDIUM_X, SMALL_X)) @@ -149,7 +149,7 @@ def test_ndarray_random_negative_binomial(): assert a.shape == (MEDIUM_X, SMALL_X, SMALL_X, SMALL_Y) -@with_setup(teardown) +@with_post_test_cleanup() @with_seed() def test_ndarray_random_normal(): scale_array = nd.random.uniform(shape=(MEDIUM_X, SMALL_X)) @@ -159,7 +159,7 @@ def test_ndarray_random_normal(): assert a.shape == (MEDIUM_X, SMALL_X, SMALL_X, SMALL_Y) -@with_setup(teardown) +@with_post_test_cleanup() @with_seed() def test_ndarray_random_poisson(): lambda_array = nd.random.uniform(shape=(MEDIUM_X, SMALL_X)) @@ -168,7 +168,7 @@ def test_ndarray_random_poisson(): assert a.shape == (MEDIUM_X, SMALL_X, SMALL_X, SMALL_Y) -@with_setup(teardown) +@with_post_test_cleanup() @with_seed() def test_ndarray_random_randn(): a = nd.random.randn(LARGE_X, SMALL_Y) @@ -177,7 +177,7 @@ def test_ndarray_random_randn(): # Add check for (x,y,m,n) where x,y shape of loc,scale and m,n input shape -@with_setup(teardown) +@with_post_test_cleanup() @with_seed() def test_ndarray_random_shuffle(): a = nd.ones(shape=(LARGE_X, SMALL_Y)) @@ -194,13 +194,13 @@ def test_ndarray_random_shuffle(): assert a.shape == (LARGE_X, SMALL_Y) -@with_setup(teardown) +@with_post_test_cleanup() def test_ndarray_empty(): a = nd.empty((LARGE_X, SMALL_Y)) assert a.shape == (LARGE_X, SMALL_Y) -@with_setup(teardown) +@with_post_test_cleanup() def test_elementwise(): a = nd.ones(shape=(LARGE_X, SMALL_Y)) b = nd.ones(shape=(LARGE_X, SMALL_Y)) @@ -212,13 +212,13 @@ def test_elementwise(): assert np.sum(res[-1].asnumpy() == 2) == a.shape[1] -@with_setup(teardown) +@with_post_test_cleanup() def test_reduce(): a = nd.ones(shape=(LARGE_X, SMALL_Y)) assert nd.sum(a).asnumpy() == a.shape[0] * a.shape[1] -@with_setup(teardown) +@with_post_test_cleanup() def test_dot(): a = nd.ones(shape=(LARGE_X, SMALL_Y)) b = nd.ones(shape=(SMALL_Y, SMALL_Y)) @@ -226,7 +226,7 @@ def test_dot(): assert np.sum(res[-1].asnumpy() == SMALL_Y) == b.shape[1] -@with_setup(teardown) +@with_post_test_cleanup() def test_FullyConnected(): a = nd.ones(shape=(LARGE_X, SMALL_Y)) b = nd.ones(shape=(SMALL_Y, SMALL_Y)) @@ -234,7 +234,7 @@ def test_FullyConnected(): assert np.sum(res[-1].asnumpy() == SMALL_Y) == b.shape[1] -@with_setup(teardown) +@with_post_test_cleanup() def test_broadcast(): a = nd.ones(shape=(LARGE_X, SMALL_Y)) b = nd.arange(0, LARGE_X).reshape(LARGE_X, 1) @@ -244,14 +244,14 @@ def test_broadcast(): assert np.sum(res[-1].asnumpy() == LARGE_X) == a.shape[1] -@with_setup(teardown) +@with_post_test_cleanup() def test_clip(): a = nd.arange(0, LARGE_X * SMALL_Y).reshape(LARGE_X, SMALL_Y) res = nd.clip(a, a_min=100, a_max=1000) assert np.sum(res[-1].asnumpy() == 1000) == a.shape[1] -@with_setup(teardown) +@with_post_test_cleanup() def test_split(): a = nd.arange(0, LARGE_X * SMALL_Y).reshape(LARGE_X, SMALL_Y) outs = nd.split(a, num_outputs=SMALL_Y, axis=1) @@ -259,21 +259,21 @@ def test_split(): assert result == a.shape[1] -@with_setup(teardown) +@with_post_test_cleanup() def test_argmin(): a = nd.arange(0, LARGE_X * SMALL_Y).reshape(LARGE_X, SMALL_Y) idx = mx.nd.argmin(a, axis=0) assert idx.shape[0] == SMALL_Y -@with_setup(teardown) +@with_post_test_cleanup() def test_tile(): a = nd.arange(0, LARGE_X).reshape(LARGE_X, 1) b = nd.tile(a, reps=(1, SMALL_Y)) assert np.sum(b[-1].asnumpy() == LARGE_X) == b.shape[1] -@with_setup(teardown) +@with_post_test_cleanup() def test_take(): a = nd.ones(shape=(LARGE_X, SMALL_Y)) idx = nd.arange(LARGE_X - 1000, LARGE_X) @@ -281,21 +281,21 @@ def test_take(): assert np.sum(res[-1].asnumpy() == 1) == res.shape[1] -@with_setup(teardown) +@with_post_test_cleanup() def test_slice(): a = nd.ones(shape=(LARGE_X, SMALL_Y)) res = nd.slice(a, begin=(LARGE_X-1000, 1), end=(LARGE_X, SMALL_Y)) assert np.sum(res[-1].asnumpy() == 1) == res.shape[1] -@with_setup(teardown) +@with_post_test_cleanup() def test_slice_assign(): a = nd.ones(shape=(LARGE_X, SMALL_Y)) a[LARGE_X-1:LARGE_X] = 1000 assert np.sum(a[-1].asnumpy() == 1000) == a.shape[1] -@with_setup(teardown) +@with_post_test_cleanup() def test_expand_dims(): a = nd.ones(shape=(LARGE_X, SMALL_Y)) res = nd.expand_dims(a, axis=1) @@ -303,7 +303,7 @@ def test_expand_dims(): assert res.shape == (a.shape[0], 1, a.shape[1]) -@with_setup(teardown) +@with_post_test_cleanup() def test_squeeze(): a = nd.ones(shape=(LARGE_X, SMALL_Y)) data = nd.expand_dims(a, axis=1) @@ -311,7 +311,7 @@ def test_squeeze(): assert res.shape == a.shape -@with_setup(teardown) +@with_post_test_cleanup() def test_broadcast_div(): a = nd.ones(shape=(LARGE_X, SMALL_Y)) b = nd.ones(shape=(LARGE_X, 1)) * 2 @@ -319,7 +319,7 @@ def test_broadcast_div(): assert np.sum(res[-1].asnumpy() == 0.5) == a.shape[1] -@with_setup(teardown) +@with_post_test_cleanup() def test_Dense(ctx=mx.cpu(0)): data = mx.nd.ones(shape=(50*1000*1000, 100)) linear = gluon.nn.Dense(100) @@ -328,7 +328,7 @@ def test_Dense(ctx=mx.cpu(0)): assert res.shape == (50000000, 100) -@with_setup(teardown) +@with_post_test_cleanup() def test_where(): a = nd.ones(shape=(LARGE_X, SMALL_Y)) b = nd.arange(0, LARGE_X * SMALL_Y).reshape(LARGE_X, SMALL_Y) @@ -339,7 +339,7 @@ def test_where(): assert np.sum(res[0].asnumpy() == 1) == 10 -@with_setup(teardown) +@with_post_test_cleanup() def test_pick(): a = mx.nd.ones(shape=(256 * 35, 1024 * 1024)) b = mx.nd.ones(shape=(256 * 35, )) @@ -347,7 +347,7 @@ def test_pick(): assert res.shape == b.shape -@with_setup(teardown) +@with_post_test_cleanup() def test_depthtospace(): def numpy_depth_to_space(x, blocksize): b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3] @@ -366,7 +366,7 @@ def numpy_depth_to_space(x, blocksize): assert_almost_equal(output.asnumpy(), expected, atol=1e-3, rtol=1e-3) -@with_setup(teardown) +@with_post_test_cleanup() def test_spacetodepth(): def numpy_space_to_depth(x, blocksize): b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3] @@ -385,7 +385,7 @@ def numpy_space_to_depth(x, blocksize): assert_almost_equal(output.asnumpy(), expected, atol=1e-3, rtol=1e-3) -@with_setup(teardown) +@with_post_test_cleanup() @with_seed() def test_diag(): a_np = np.random.random((LARGE_X, SMALL_Y)).astype(np.float32) @@ -411,7 +411,7 @@ def test_diag(): assert_almost_equal(r.asnumpy(), np.diag(a_np, k=k)) -@with_setup(teardown) +@with_post_test_cleanup() @with_seed() def test_ravel_multi_index(): x1, y1 = rand_coord_2d((LARGE_X - 100), LARGE_X, 10, SMALL_Y) @@ -424,7 +424,7 @@ def test_ravel_multi_index(): assert np.sum(1 for i in range(idx.size) if idx[i] == idx_numpy[i]) == 3 -@with_setup(teardown) +@with_post_test_cleanup() @with_seed() def test_unravel_index(): x1, y1 = rand_coord_2d((LARGE_X - 100), LARGE_X, 10, SMALL_Y) @@ -437,7 +437,7 @@ def test_unravel_index(): assert (indices_2d.asnumpy() == np.array(original_2d_indices)).all() -@with_setup(teardown) +@with_post_test_cleanup() def test_transpose(): b = create_2d_tensor(rows=LARGE_X, columns=SMALL_Y) t = b.T @@ -445,7 +445,7 @@ def test_transpose(): assert t.shape == (SMALL_Y, LARGE_X) -@with_setup(teardown) +@with_post_test_cleanup() def test_swapaxes(): b = create_2d_tensor(rows=LARGE_X, columns=SMALL_Y) t = nd.swapaxes(b, dim1=0, dim2=1) @@ -453,7 +453,7 @@ def test_swapaxes(): assert t.shape == (SMALL_Y, LARGE_X) -@with_setup(teardown) +@with_post_test_cleanup() def test_flip(): b = create_2d_tensor(rows=LARGE_X, columns=SMALL_Y) t = nd.flip(b, axis=0) @@ -461,7 +461,7 @@ def test_flip(): assert t.shape == (LARGE_X, SMALL_Y) -@with_setup(teardown) +@with_post_test_cleanup() def test_softmax(): input_data = mx.nd.ones((SMALL_Y, LARGE_X)) true_output = np.full((SMALL_Y, LARGE_X), (1 / SMALL_Y)) @@ -469,7 +469,7 @@ def test_softmax(): assert_almost_equal(output.asnumpy(), true_output, rtol=1e-5, atol=1e-5) -@with_setup(teardown) +@with_post_test_cleanup() def test_argsort(): b = create_2d_tensor(rows=LARGE_X, columns=SMALL_Y) s = nd.argsort(b, axis=0, is_ascend=False, dtype=np.int64) @@ -477,7 +477,7 @@ def test_argsort(): assert (s[0].asnumpy() == (LARGE_X - 1)).all() -@with_setup(teardown) +@with_post_test_cleanup() def test_sort(): b = create_2d_tensor(rows=LARGE_X, columns=SMALL_Y) s = nd.sort(b, axis=0, is_ascend=False) @@ -486,7 +486,7 @@ def test_sort(): assert np.sum(s[0].asnumpy() == 0).all() -@with_setup(teardown) +@with_post_test_cleanup() def test_topk(): b = create_2d_tensor(rows=LARGE_X, columns=SMALL_Y) k = nd.topk(b, k=10, axis=0, dtype=np.int64) @@ -499,7 +499,7 @@ def test_topk(): assert l.sum() == np.sum(np.arange(0, SMALL_Y)) -@with_setup(teardown) +@with_post_test_cleanup() def test_exponent_logarithm_operators(): a = 2*nd.ones(shape=(LARGE_X, SMALL_Y)) # exponent @@ -533,7 +533,7 @@ def test_exponent_logarithm_operators(): assert result.shape == a.shape -@with_setup(teardown) +@with_post_test_cleanup() def test_power_operators(): a = 2*nd.ones(shape=(LARGE_X, SMALL_Y)) # sqrt @@ -567,7 +567,7 @@ def test_power_operators(): assert result.shape == a.shape -@with_setup(teardown) +@with_post_test_cleanup() def test_sequence_mask(): # Sequence Mask input [max_sequence_length, batch_size, other_feature_dims] # test with input batch_size = 2 @@ -591,7 +591,7 @@ def test_sequence_mask(): assert b[-1][-1][-1] == -1 -@with_setup(teardown) +@with_post_test_cleanup() def test_sequence_reverse(): a = nd.arange(0, LARGE_X * SMALL_Y * 2).reshape(LARGE_X, 2, SMALL_Y) # test as reverse operator @@ -608,7 +608,7 @@ def test_sequence_reverse(): assert b.shape == a.shape -@with_setup(teardown) +@with_post_test_cleanup() def test_sequence_last(): a = nd.arange(0, LARGE_X * SMALL_Y * 2).reshape(LARGE_X, 2, SMALL_Y) @@ -626,7 +626,7 @@ def test_sequence_last(): assert b[0][-1] == a[1][0][-1] -@with_setup(teardown) +@with_post_test_cleanup() def test_softmax_cross_entropy(): # dtype of input data, mxnet cross entropy set explicitly to float64 # numpy implicitly takes care of double precision @@ -651,7 +651,7 @@ def test_softmax_cross_entropy(): true_softmax_cross_entropy, rtol=1e-3, atol=1e-5) -@with_setup(teardown) +@with_post_test_cleanup() def test_index_copy(): x = mx.nd.zeros((LARGE_X, SMALL_Y)) t = mx.nd.arange(1, SMALL_Y + 1).reshape((1, SMALL_Y)) @@ -661,7 +661,7 @@ def test_index_copy(): assert x[-1][-1] == t[0][-1] -@with_setup(teardown) +@with_post_test_cleanup() def testSoftmaxOutput(): x = mx.sym.Variable('x') label = mx.sym.Variable('label') @@ -688,7 +688,7 @@ def testSoftmaxOutput(): # TODO: correctness of prelu (currently flaky) -@with_setup(teardown) +@with_post_test_cleanup() def test_leaky_relu(): a = -1*mx.nd.ones((LARGE_X, SMALL_Y)) @@ -718,7 +718,7 @@ def test_rrelu(): test_rrelu() -@with_setup(teardown) +@with_post_test_cleanup() def test_pooling(): a = mx.nd.ones((MEDIUM_X, 200, SMALL_Y, SMALL_Y)) @@ -752,7 +752,7 @@ def test_lp_pooling(): test_lp_pooling() -@with_setup(teardown) +@with_post_test_cleanup() def test_layer_norm(): dtype = np.float32 forward_check_eps = 1E-3 @@ -793,7 +793,7 @@ def npy_layer_norm(data, gamma, beta, axis=1, eps=1E-5): # TODO: correctness of dropout # currently only test for dropout to work # since testing for correctness involves flakiness issue #14288 -@with_setup(teardown) +@with_post_test_cleanup() def test_dropout(): shape = (LARGE_X, SMALL_Y) x = mx.sym.var('data') @@ -805,7 +805,7 @@ def test_dropout(): assert out[0].shape == shape -@with_setup(teardown) +@with_post_test_cleanup() def test_activation(): x = mx.nd.ones((LARGE_X, SMALL_Y)) test_x = -2 @@ -838,7 +838,7 @@ def test_activation(): # TODO: correctness of batchnorm # in future, we could test if mean, var of output # matches target output's mean, var -@with_setup(teardown) +@with_post_test_cleanup() def test_batchnorm(): shape = (LARGE_X, SMALL_Y) axis = 1 # default @@ -855,7 +855,7 @@ def test_batchnorm(): assert output.shape == shape -@with_setup(teardown) +@with_post_test_cleanup() def test_add(): a = nd.ones(shape=(LARGE_X, SMALL_Y)) b = nd.ones(shape=(LARGE_X, SMALL_Y)) @@ -865,7 +865,7 @@ def test_add(): assert c.shape == a.shape -@with_setup(teardown) +@with_post_test_cleanup() def test_sub(): a = 3*nd.ones(shape=(LARGE_X, SMALL_Y)) b = nd.ones(shape=(LARGE_X, SMALL_Y)) @@ -875,7 +875,7 @@ def test_sub(): assert c.shape == a.shape -@with_setup(teardown) +@with_post_test_cleanup() def test_rsub(): a = 3*nd.ones(shape=(LARGE_X, SMALL_Y)) b = nd.ones(shape=(LARGE_X, SMALL_Y)) @@ -885,7 +885,7 @@ def test_rsub(): assert c.shape == a.shape -@with_setup(teardown) +@with_post_test_cleanup() def test_neg(): a = nd.ones(shape=(LARGE_X, SMALL_Y)) c = a @@ -894,7 +894,7 @@ def test_neg(): assert c.shape == a.shape -@with_setup(teardown) +@with_post_test_cleanup() def test_mul(): a = 2*nd.ones(shape=(LARGE_X, SMALL_Y)) b = 3*nd.ones(shape=(LARGE_X, SMALL_Y)) @@ -904,7 +904,7 @@ def test_mul(): assert c.shape == a.shape -@with_setup(teardown) +@with_post_test_cleanup() def test_div(): a = 2*nd.ones(shape=(LARGE_X, SMALL_Y)) b = 3*nd.ones(shape=(LARGE_X, SMALL_Y)) @@ -914,7 +914,7 @@ def test_div(): assert c.shape == a.shape -@with_setup(teardown) +@with_post_test_cleanup() def test_rdiv(): a = 2*nd.ones(shape=(LARGE_X, SMALL_Y)) b = 3*nd.ones(shape=(LARGE_X, SMALL_Y)) @@ -924,7 +924,7 @@ def test_rdiv(): assert c.shape == a.shape -@with_setup(teardown) +@with_post_test_cleanup() def test_mod(): a = 2*nd.ones(shape=(LARGE_X, SMALL_Y)) b = 3*nd.ones(shape=(LARGE_X, SMALL_Y)) @@ -934,7 +934,7 @@ def test_mod(): assert c.shape == a.shape -@with_setup(teardown) +@with_post_test_cleanup() def test_rmod(): a = 2*nd.ones(shape=(LARGE_X, SMALL_Y)) b = 3*nd.ones(shape=(LARGE_X, SMALL_Y)) @@ -944,7 +944,7 @@ def test_rmod(): assert c.shape == a.shape -@with_setup(teardown) +@with_post_test_cleanup() def test_imod(): a = 2*nd.ones(shape=(LARGE_X, SMALL_Y)) b = 3*nd.ones(shape=(LARGE_X, SMALL_Y)) @@ -954,7 +954,7 @@ def test_imod(): assert c.shape == a.shape -@with_setup(teardown) +@with_post_test_cleanup() def test_pow(): a = 2*nd.ones(shape=(LARGE_X, SMALL_Y)) b = 3*nd.ones(shape=(LARGE_X, SMALL_Y)) @@ -964,7 +964,7 @@ def test_pow(): assert c.shape == a.shape -@with_setup(teardown) +@with_post_test_cleanup() def test_rpow(): a = 2*nd.ones(shape=(LARGE_X, SMALL_Y)) b = 3*nd.ones(shape=(LARGE_X, SMALL_Y)) @@ -974,21 +974,21 @@ def test_rpow(): assert c.shape == a.shape -@with_setup(teardown) +@with_post_test_cleanup() def test_shape(): b = create_2d_tensor(rows=SMALL_Y, columns=LARGE_X) mx.nd.waitall() assert b.shape == (SMALL_Y, LARGE_X) -@with_setup(teardown) +@with_post_test_cleanup() def test_size(): b = create_2d_tensor(rows=SMALL_Y, columns=LARGE_X) mx.nd.waitall() assert b.size == LARGE_SIZE -@with_setup(teardown) +@with_post_test_cleanup() def test_copy(): a = nd.ones((SMALL_Y, LARGE_X)) b = a.copy() @@ -997,7 +997,7 @@ def test_copy(): assert b.size == LARGE_SIZE -@with_setup(teardown) +@with_post_test_cleanup() def test_copy_to(): a = create_2d_tensor(rows=SMALL_Y, columns=LARGE_X) b = nd.array(np.zeros((SMALL_Y, LARGE_X))) @@ -1006,7 +1006,7 @@ def test_copy_to(): assert b[-1][-1] == SMALL_Y-1 -@with_setup(teardown) +@with_post_test_cleanup() def test_zeros_like(): a = nd.array(np.ones((SMALL_Y, LARGE_X))) b = nd.zeros_like(a) @@ -1014,7 +1014,7 @@ def test_zeros_like(): assert b.shape == a.shape -@with_setup(teardown) +@with_post_test_cleanup() def test_ones_like(): a = nd.array(np.zeros((SMALL_Y, LARGE_X))) b = nd.ones_like(a) @@ -1022,7 +1022,7 @@ def test_ones_like(): assert b.shape == a.shape -@with_setup(teardown) +@with_post_test_cleanup() def test_reshape_like(): a = nd.array(np.zeros((SMALL_Y, LARGE_X))) b = nd.array(np.zeros((SMALL_Y//2, LARGE_X*2))) @@ -1030,7 +1030,7 @@ def test_reshape_like(): assert c.shape == (SMALL_Y//2, LARGE_X*2) -@with_setup(teardown) +@with_post_test_cleanup() def test_flatten(): a = create_2d_tensor(rows=LARGE_X, columns=SMALL_Y).reshape((LARGE_X//2, 2, SMALL_Y)) b = nd.flatten(a) @@ -1039,7 +1039,7 @@ def test_flatten(): assert b.shape == (LARGE_X//2, SMALL_Y*2) -@with_setup(teardown) +@with_post_test_cleanup() def test_concat(): a = nd.array(np.ones((SMALL_Y, LARGE_X))) b = nd.array(np.zeros((SMALL_Y, LARGE_X))) @@ -1047,7 +1047,7 @@ def test_concat(): assert c.shape == (b.shape[0]*2, LARGE_X) -@with_setup(teardown) +@with_post_test_cleanup() def test_stack(): a = nd.array(np.ones((SMALL_Y, LARGE_X))) b = nd.array(np.zeros((SMALL_Y, LARGE_X))) @@ -1055,35 +1055,35 @@ def test_stack(): assert c.shape == (b.shape[0], 2, LARGE_X) -@with_setup(teardown) +@with_post_test_cleanup() def test_broadcast_axes(): a = create_2d_tensor(rows=1, columns=LARGE_X) b = nd.broadcast_axis(a, axis=[0], size=2) assert b.shape == (a.shape[0]*2, a.shape[1]) -@with_setup(teardown) +@with_post_test_cleanup() def test_sum(): a = nd.array(np.ones((SMALL_Y, LARGE_X))) b = nd.sum(a, axis=1) assert b.shape[0] == SMALL_Y -@with_setup(teardown) +@with_post_test_cleanup() def test_prod(): a = nd.array(np.ones((SMALL_Y, LARGE_X))) b = nd.prod(a, axis=1) assert b.shape[0] == SMALL_Y -@with_setup(teardown) +@with_post_test_cleanup() def test_mean(): a = create_2d_tensor(rows=SMALL_Y, columns=LARGE_X) b = nd.mean(a, axis=0) assert b[0] == (SMALL_Y/2-1) -@with_setup(teardown) +@with_post_test_cleanup() def test_min(): a = create_2d_tensor(rows=SMALL_Y, columns=LARGE_X) b = nd.min(a, axis=0) @@ -1091,7 +1091,7 @@ def test_min(): assert b[-1] == 0 -@with_setup(teardown) +@with_post_test_cleanup() def test_max(): a = create_2d_tensor(rows=SMALL_Y, columns=LARGE_X) b = nd.max(a, axis=0) @@ -1099,7 +1099,7 @@ def test_max(): assert b[-1] == (SMALL_Y-1) -@with_setup(teardown) +@with_post_test_cleanup() def test_norm(): a = np.array(np.full((1, LARGE_X), 3)) b = np.array(np.full((1, LARGE_X), 4)) @@ -1112,7 +1112,7 @@ def test_norm(): assert e[-1] == 7 -@with_setup(teardown) +@with_post_test_cleanup() def test_argmax(): a = np.ones((SMALL_Y, LARGE_X)) b = np.zeros((SMALL_Y, LARGE_X)) @@ -1122,7 +1122,7 @@ def test_argmax(): assert d[-1] == d[0] == 0 -@with_setup(teardown) +@with_post_test_cleanup() def test_relu(): def frelu(x): return np.maximum(x, 0.0) @@ -1140,7 +1140,7 @@ def frelu_grad(x): check_symbolic_forward(y, [xa], [ya]) -@with_setup(teardown) +@with_post_test_cleanup() def test_sigmoid(): def fsigmoid(a): return np.divide(1.0, (1.0 + np.exp(-a))) @@ -1152,7 +1152,7 @@ def fsigmoid(a): check_symbolic_forward(y, [xa], [ya]) -@with_setup(teardown) +@with_post_test_cleanup() def np_softmax(x, axis=-1, temperature=1.0): x = x - np.max(x, axis=axis, keepdims=True) x = np.exp(x/temperature) @@ -1160,7 +1160,7 @@ def np_softmax(x, axis=-1, temperature=1.0): return x -@with_setup(teardown) +@with_post_test_cleanup() def test_log_softmax(): ndim = 2 shape = (SMALL_Y, LARGE_X) @@ -1170,7 +1170,7 @@ def test_log_softmax(): check_symbolic_forward(sym, [data], [np.log(np_softmax(data, axis=axis)+1e-20)]) -@with_setup(teardown) +@with_post_test_cleanup() def test_iadd(): a = nd.array(np.ones((SMALL_Y, LARGE_X))) b = nd.array(np.ones((SMALL_Y, LARGE_X))) @@ -1180,7 +1180,7 @@ def test_iadd(): assert c[0][-1] == 2 -@with_setup(teardown) +@with_post_test_cleanup() def test_isub(): a = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 3))) b = nd.array(np.ones((SMALL_Y, LARGE_X))) @@ -1190,7 +1190,7 @@ def test_isub(): assert c[0][-1] == 2 -@with_setup(teardown) +@with_post_test_cleanup() def test_imul(): a = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 3))) b = nd.array(np.ones((SMALL_Y, LARGE_X))) @@ -1200,7 +1200,7 @@ def test_imul(): assert c[0][-1] == 3 -@with_setup(teardown) +@with_post_test_cleanup() def test_idiv(): a = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 4))) b = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 2))) @@ -1210,7 +1210,7 @@ def test_idiv(): assert c[0][-1] == 2 -@with_setup(teardown) +@with_post_test_cleanup() def test_eq(): a = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 3))) b = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 3))) @@ -1218,7 +1218,7 @@ def test_eq(): assert np.sum(c[0].asnumpy() == 1).all() -@with_setup(teardown) +@with_post_test_cleanup() def test_neq(): a = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 2))) b = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 3))) @@ -1226,7 +1226,7 @@ def test_neq(): assert np.sum(c[0].asnumpy() == 1).all() -@with_setup(teardown) +@with_post_test_cleanup() def test_lt(): a = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 2))) b = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 3))) @@ -1234,7 +1234,7 @@ def test_lt(): assert np.sum(d[0].asnumpy() == 1).all() -@with_setup(teardown) +@with_post_test_cleanup() def test_lte(): a = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 2))) b = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 3))) @@ -1245,7 +1245,7 @@ def test_lte(): assert np.sum(e[0].asnumpy() == 1).all() -@with_setup(teardown) +@with_post_test_cleanup() def test_gt(): a = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 3))) b = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 2))) @@ -1253,7 +1253,7 @@ def test_gt(): assert np.sum(d[0].asnumpy() == 1).all() -@with_setup(teardown) +@with_post_test_cleanup() def test_gte(): a = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 3))) b = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 2))) @@ -1264,7 +1264,7 @@ def test_gte(): assert np.sum(e[0].asnumpy() == 1).all() -@with_setup(teardown) +@with_post_test_cleanup() def test_slice_like(): a = create_2d_tensor(rows=SMALL_Y, columns=LARGE_X) b = nd.array(np.ones((SMALL_Y//2, LARGE_X//2))) @@ -1279,7 +1279,7 @@ def test_slice_like(): assert e[-1][-1] == (SMALL_Y-1) -@with_setup(teardown) +@with_post_test_cleanup() def test_slice_axis(): a = create_2d_tensor(rows=SMALL_Y, columns=LARGE_X) c = nd.slice_axis(a, axis=0, begin=0, end=SMALL_Y//2) @@ -1290,7 +1290,7 @@ def test_slice_axis(): assert d[-1][-1] == (SMALL_Y-1) -@with_setup(teardown) +@with_post_test_cleanup() def test_one_hot(): # default dtype of ndarray is float32 which cannot index elements over 2^32 a = nd.array([1, (VLARGE_X - 1)], dtype=np.int64) @@ -1299,7 +1299,7 @@ def test_one_hot(): b[1][-1] == 1 -@with_setup(teardown) +@with_post_test_cleanup() def test_full(): a = nd.full((SMALL_Y, LARGE_X), 3) assert a.shape == (SMALL_Y, LARGE_X) diff --git a/tests/nightly/test_large_vector.py b/tests/nightly/test_large_vector.py index f857d8ae4933..e5411d6ef7dc 100644 --- a/tests/nightly/test_large_vector.py +++ b/tests/nightly/test_large_vector.py @@ -21,7 +21,7 @@ from mxnet.test_utils import rand_ndarray, assert_almost_equal, rand_coord_2d, create_vector from mxnet import gluon, nd -from tests.python.unittest.common import with_seed, teardown +from tests.python.unittest.common import with_seed, with_post_test_cleanup from nose.tools import with_setup # dimension constants @@ -29,7 +29,7 @@ MEDIUM_X = 1000000000 -@with_setup(teardown) +@with_post_test_cleanup() def test_slice(): a = nd.ones(LARGE_X) res = nd.slice(a, begin=(LARGE_X - MEDIUM_X), end=LARGE_X) @@ -37,7 +37,7 @@ def test_slice(): assert res[0] == 1 -@with_setup(teardown) +@with_post_test_cleanup() def test_ndarray_zeros(): a = nd.zeros(shape=LARGE_X) assert a[-1] == 0 @@ -45,21 +45,21 @@ def test_ndarray_zeros(): assert a.size == LARGE_X -@with_setup(teardown) +@with_post_test_cleanup() def test_ndarray_ones(): a = nd.ones(shape=LARGE_X) assert a[-1] == 1 assert nd.sum(a) == LARGE_X -@with_setup(teardown) +@with_post_test_cleanup() @with_seed() def test_ndarray_random_uniform(): a = nd.random.uniform(shape=LARGE_X) assert a[-1] != 0 -@with_setup(teardown) +@with_post_test_cleanup() @with_seed() def test_ndarray_random_randint(): # check if randint can generate value greater than 2**32 (large) @@ -70,13 +70,13 @@ def test_ndarray_random_randint(): assert (a >= low).all() and (a < high).all() -@with_setup(teardown) +@with_post_test_cleanup() def test_ndarray_empty(): a = nd.empty(LARGE_X) assert a.shape == (LARGE_X,) -@with_setup(teardown) +@with_post_test_cleanup() def test_elementwise(): a = nd.ones(shape=LARGE_X) b = nd.ones(shape=LARGE_X) @@ -88,14 +88,14 @@ def test_elementwise(): assert res[-1].asnumpy() == 3 -@with_setup(teardown) +@with_post_test_cleanup() def test_clip(): a = create_vector(LARGE_X) res = nd.clip(a, a_min=100, a_max=1000) assert res[-1] == 1000 -@with_setup(teardown) +@with_post_test_cleanup() def test_argmin(): a = create_vector(LARGE_X, dtype=np.float32) assert a[0] == 0 @@ -104,7 +104,7 @@ def test_argmin(): assert idx.shape[0] == 1 -@with_setup(teardown) +@with_post_test_cleanup() def test_take(): a = nd.ones(shape=LARGE_X) idx = nd.arange(LARGE_X - 1000, LARGE_X) @@ -112,14 +112,14 @@ def test_take(): assert np.sum(res.asnumpy() == 1) == res.shape[0] -@with_setup(teardown) +@with_post_test_cleanup() def test_slice_assign(): a = nd.ones(shape=LARGE_X) a[LARGE_X-1:LARGE_X] = 1000 assert np.sum(a[-1].asnumpy() == 1000) == 1 -@with_setup(teardown) +@with_post_test_cleanup() def test_expand_dims(): a = nd.ones(shape=LARGE_X) res = nd.expand_dims(a, axis=0) @@ -127,7 +127,7 @@ def test_expand_dims(): assert res.shape == (1, a.shape[0]) -@with_setup(teardown) +@with_post_test_cleanup() def test_squeeze(): a = nd.ones(shape=LARGE_X) data = nd.expand_dims(a, axis=0) @@ -136,7 +136,7 @@ def test_squeeze(): assert res.shape == a.shape -@with_setup(teardown) +@with_post_test_cleanup() def test_broadcast_div(): a = nd.ones(shape=LARGE_X) b = nd.ones(shape=LARGE_X) * 2 @@ -144,7 +144,7 @@ def test_broadcast_div(): assert np.sum(res.asnumpy() == 0.5) == a.shape[0] -@with_setup(teardown) +@with_post_test_cleanup() def test_Dense(ctx=mx.cpu(0)): data = mx.nd.ones(shape=LARGE_X) linear = gluon.nn.Dense(2) @@ -153,14 +153,14 @@ def test_Dense(ctx=mx.cpu(0)): assert res.shape == (LARGE_X, 2) -@with_setup(teardown) +@with_post_test_cleanup() def test_argsort(): a = create_vector(size=LARGE_X) s = nd.argsort(a, axis=0, is_ascend=False, dtype=np.int64) assert s[0] == (LARGE_X - 1) -@with_setup(teardown) +@with_post_test_cleanup() def test_sort(): a = create_vector(size=LARGE_X) @@ -176,7 +176,7 @@ def test_ascend(x): test_ascend(a) -@with_setup(teardown) +@with_post_test_cleanup() def test_topk(): a = create_vector(size=LARGE_X) ind = nd.topk(a, k=10, axis=0, dtype=np.int64) @@ -188,14 +188,14 @@ def test_topk(): assert val == (LARGE_X - 1) -@with_setup(teardown) +@with_post_test_cleanup() def test_mean(): a = nd.arange(-LARGE_X // 2, LARGE_X // 2 + 1, dtype=np.int64) b = nd.mean(a, axis=0) assert b == 0 -@with_setup(teardown) +@with_post_test_cleanup() @with_seed() def test_ndarray_random_exponential(): a = nd.random.exponential(shape=LARGE_X) @@ -203,7 +203,7 @@ def test_ndarray_random_exponential(): assert a.shape[0] == LARGE_X -@with_setup(teardown) +@with_post_test_cleanup() @with_seed() def test_ndarray_random_gamma(): a = nd.random.gamma(shape=LARGE_X) @@ -211,7 +211,7 @@ def test_ndarray_random_gamma(): assert a.shape[0] == LARGE_X -@with_setup(teardown) +@with_post_test_cleanup() @with_seed() def test_ndarray_random_generalized_negative_binomial(): a = nd.random.generalized_negative_binomial(shape=LARGE_X) @@ -219,7 +219,7 @@ def test_ndarray_random_generalized_negative_binomial(): assert a.shape[0] == LARGE_X -@with_setup(teardown) +@with_post_test_cleanup() @with_seed() def test_ndarray_random_multinomial(): a = nd.random.multinomial(nd.random.uniform(shape=LARGE_X)) @@ -227,7 +227,7 @@ def test_ndarray_random_multinomial(): assert a.shape[0] == 1 -@with_setup(teardown) +@with_post_test_cleanup() @with_seed() def test_ndarray_random_negative_binomial(): a = nd.random.negative_binomial(shape=LARGE_X) @@ -235,14 +235,14 @@ def test_ndarray_random_negative_binomial(): assert a.shape[0] == LARGE_X -@with_setup(teardown) +@with_post_test_cleanup() @with_seed() def test_ndarray_random_normal(): a = nd.random.normal(shape=LARGE_X) assert a.shape[0] == LARGE_X -@with_setup(teardown) +@with_post_test_cleanup() @with_seed() def test_ndarray_random_poisson(): a = nd.random.poisson(shape=LARGE_X) @@ -250,14 +250,14 @@ def test_ndarray_random_poisson(): assert a.shape[0] == LARGE_X -@with_setup(teardown) +@with_post_test_cleanup() @with_seed() def test_ndarray_random_randn(): a = nd.random.randn(LARGE_X) assert a.shape[0] == LARGE_X -@with_setup(teardown) +@with_post_test_cleanup() @with_seed() def test_ndarray_random_shuffle(): a = nd.ones(shape=LARGE_X) @@ -270,7 +270,7 @@ def test_ndarray_random_shuffle(): assert a.shape[0] == LARGE_X -@with_setup(teardown) +@with_post_test_cleanup() def test_exponent_logarithm_operators(): a = 2*nd.ones(shape=LARGE_X) # exponent @@ -304,7 +304,7 @@ def test_exponent_logarithm_operators(): assert result.shape == a.shape -@with_setup(teardown) +@with_post_test_cleanup() def test_power_operators(): a = 2*nd.ones(shape=LARGE_X) # sqrt @@ -338,7 +338,7 @@ def test_power_operators(): assert result.shape == a.shape -@with_setup(teardown) +@with_post_test_cleanup() def test_sequence_mask(): # Sequence Mask input [max_sequence_length, batch_size] # test with input batch_size = 2 @@ -362,7 +362,7 @@ def test_sequence_mask(): assert b[-1][-1] == -1 -@with_setup(teardown) +@with_post_test_cleanup() def test_sequence_reverse(): a = nd.arange(0, LARGE_X * 2).reshape(LARGE_X, 2) # test as reverse operator @@ -378,7 +378,7 @@ def test_sequence_reverse(): assert b.shape == a.shape -@with_setup(teardown) +@with_post_test_cleanup() def test_sequence_last(): a = nd.arange(0, LARGE_X * 2).reshape(LARGE_X, 2) @@ -400,7 +400,7 @@ def test_sequence_last(): # TODO: correctness of layernorm # numpy implementation for large vector is flaky -@with_setup(teardown) +@with_post_test_cleanup() def test_layer_norm(): axis = 0 eps = 1E-5 @@ -416,7 +416,7 @@ def test_layer_norm(): # TODO: correctness of batchnorm # in future, we could test if mean, var of output # matches target output's mean, var -@with_setup(teardown) +@with_post_test_cleanup() def test_batchnorm(): shape = LARGE_X axis = 0 # since vector @@ -432,7 +432,7 @@ def test_batchnorm(): assert output.shape == (shape,) -@with_setup(teardown) +@with_post_test_cleanup() def test_add(): a = nd.ones(shape=LARGE_X) b = nd.ones(shape=LARGE_X) @@ -442,7 +442,7 @@ def test_add(): assert c.shape == a.shape -@with_setup(teardown) +@with_post_test_cleanup() def test_sub(): a = 3*nd.ones(shape=LARGE_X) b = nd.ones(shape=LARGE_X) @@ -452,7 +452,7 @@ def test_sub(): assert c.shape == a.shape -@with_setup(teardown) +@with_post_test_cleanup() def test_rsub(): a = 3*nd.ones(shape=LARGE_X) b = nd.ones(shape=LARGE_X) @@ -462,7 +462,7 @@ def test_rsub(): assert c.shape == a.shape -@with_setup(teardown) +@with_post_test_cleanup() def test_neg(): a = nd.ones(shape=LARGE_X) c = a @@ -471,7 +471,7 @@ def test_neg(): assert c.shape == a.shape -@with_setup(teardown) +@with_post_test_cleanup() def test_mul(): a = 2*nd.ones(shape=LARGE_X) b = 3*nd.ones(shape=LARGE_X) @@ -481,7 +481,7 @@ def test_mul(): assert c.shape == a.shape -@with_setup(teardown) +@with_post_test_cleanup() def test_div(): a = 2*nd.ones(shape=LARGE_X) b = 3*nd.ones(shape=LARGE_X) @@ -491,7 +491,7 @@ def test_div(): assert c.shape == a.shape -@with_setup(teardown) +@with_post_test_cleanup() def test_rdiv(): a = 2*nd.ones(shape=LARGE_X) b = 3*nd.ones(shape=LARGE_X) @@ -501,7 +501,7 @@ def test_rdiv(): assert c.shape == a.shape -@with_setup(teardown) +@with_post_test_cleanup() def test_mod(): a = 2*nd.ones(shape=LARGE_X) b = 3*nd.ones(shape=LARGE_X) @@ -511,7 +511,7 @@ def test_mod(): assert c.shape == a.shape -@with_setup(teardown) +@with_post_test_cleanup() def test_rmod(): a = 2*nd.ones(shape=LARGE_X) b = 3*nd.ones(shape=LARGE_X) @@ -521,7 +521,7 @@ def test_rmod(): assert c.shape == a.shape -@with_setup(teardown) +@with_post_test_cleanup() def test_imod(): a = 2*nd.ones(shape=LARGE_X) b = 3*nd.ones(shape=LARGE_X) @@ -531,7 +531,7 @@ def test_imod(): assert c.shape == a.shape -@with_setup(teardown) +@with_post_test_cleanup() def test_pow(): a = 2*nd.ones(shape=LARGE_X) b = 3*nd.ones(shape=LARGE_X) @@ -541,7 +541,7 @@ def test_pow(): assert c.shape == a.shape -@with_setup(teardown) +@with_post_test_cleanup() def test_rpow(): a = 2*nd.ones(shape=LARGE_X) b = 3*nd.ones(shape=LARGE_X) @@ -551,7 +551,7 @@ def test_rpow(): assert c.shape == a.shape -@with_setup(teardown) +@with_post_test_cleanup() def test_shape(): b = create_vector(size=LARGE_X) # explicit wait_to_read() @@ -559,7 +559,7 @@ def test_shape(): assert b.shape[0] == LARGE_X -@with_setup(teardown) +@with_post_test_cleanup() def test_size(): b = create_vector(size=LARGE_X) # explicit wait_to_read() @@ -567,7 +567,7 @@ def test_size(): assert b.size == LARGE_X -@with_setup(teardown) +@with_post_test_cleanup() def test_copy(): a = nd.ones(LARGE_X) b = a.copy() @@ -576,7 +576,7 @@ def test_copy(): assert b.size == LARGE_X -@with_setup(teardown) +@with_post_test_cleanup() def test_copy_to(): a = create_vector(size=LARGE_X) # keeping dtype same as input uses parallel copy which is much faster @@ -587,7 +587,7 @@ def test_copy_to(): assert b[0] == 0 -@with_setup(teardown) +@with_post_test_cleanup() def test_zeros_like(): a = nd.ones(LARGE_X) b = nd.zeros_like(a) @@ -595,7 +595,7 @@ def test_zeros_like(): assert b.shape == a.shape -@with_setup(teardown) +@with_post_test_cleanup() def test_ones_like(): a = nd.zeros(LARGE_X) b = nd.ones_like(a) @@ -603,7 +603,7 @@ def test_ones_like(): assert b.shape == a.shape -@with_setup(teardown) +@with_post_test_cleanup() def test_concat(): a = nd.ones(LARGE_X) b = nd.zeros(LARGE_X) @@ -613,21 +613,21 @@ def test_concat(): assert c.shape[0] == (2 * LARGE_X) -@with_setup(teardown) +@with_post_test_cleanup() def test_sum(): a = nd.ones(LARGE_X) b = nd.sum(a, axis=0) assert b[0] == LARGE_X -@with_setup(teardown) +@with_post_test_cleanup() def test_prod(): a = nd.ones(LARGE_X) b = nd.prod(a, axis=0) assert b[0] == 1 -@with_setup(teardown) +@with_post_test_cleanup() def test_min(): a = create_vector(size=LARGE_X) b = nd.min(a, axis=0) @@ -635,14 +635,14 @@ def test_min(): assert b[-1] == 0 -@with_setup(teardown) +@with_post_test_cleanup() def test_max(): a = create_vector(size=LARGE_X) b = nd.max(a, axis=0) assert b[0] == (LARGE_X - 1) -@with_setup(teardown) +@with_post_test_cleanup() def test_argmax(): a = nd.ones(LARGE_X) b = nd.zeros(LARGE_X) @@ -652,7 +652,7 @@ def test_argmax(): assert d == 0 -@with_setup(teardown) +@with_post_test_cleanup() def np_softmax(x, axis=-1, temperature=1.0): x = x - np.max(x, axis=axis, keepdims=True) x = np.exp(x/temperature) @@ -660,7 +660,7 @@ def np_softmax(x, axis=-1, temperature=1.0): return x -@with_setup(teardown) +@with_post_test_cleanup() def test_iadd(): a = nd.ones(LARGE_X) b = nd.ones(LARGE_X) @@ -670,7 +670,7 @@ def test_iadd(): assert c[-1] == 2 -@with_setup(teardown) +@with_post_test_cleanup() def test_isub(): a = nd.full(LARGE_X, 3) b = nd.ones(LARGE_X) @@ -680,7 +680,7 @@ def test_isub(): assert c[-1] == 2 -@with_setup(teardown) +@with_post_test_cleanup() def test_imul(): a = nd.full(LARGE_X, 3) b = nd.ones(LARGE_X) @@ -690,7 +690,7 @@ def test_imul(): assert c[-1] == 3 -@with_setup(teardown) +@with_post_test_cleanup() def test_idiv(): a = nd.full(LARGE_X, 4) b = nd.full(LARGE_X, 2) @@ -700,7 +700,7 @@ def test_idiv(): assert c[-1] == 2 -@with_setup(teardown) +@with_post_test_cleanup() def test_eq(): a = nd.full(LARGE_X, 3) b = nd.full(LARGE_X, 3) @@ -708,7 +708,7 @@ def test_eq(): assert (c.asnumpy() == 1).all() -@with_setup(teardown) +@with_post_test_cleanup() def test_neq(): a = nd.full(LARGE_X, 2) b = nd.full(LARGE_X, 3) @@ -716,7 +716,7 @@ def test_neq(): assert (c.asnumpy() == 1).all() -@with_setup(teardown) +@with_post_test_cleanup() def test_lt(): a = nd.full(LARGE_X, 2) b = nd.full(LARGE_X, 3) @@ -724,7 +724,7 @@ def test_lt(): assert (d.asnumpy() == 1).all() -@with_setup(teardown) +@with_post_test_cleanup() def test_lte(): a = nd.full(LARGE_X, 2) b = nd.full(LARGE_X, 3) @@ -735,7 +735,7 @@ def test_lte(): assert (d.asnumpy() == 1).all() -@with_setup(teardown) +@with_post_test_cleanup() def test_gt(): a = nd.full(LARGE_X, 3) b = nd.full(LARGE_X, 2) @@ -743,7 +743,7 @@ def test_gt(): assert (d.asnumpy() == 1).all() -@with_setup(teardown) +@with_post_test_cleanup() def test_gte(): a = nd.full(LARGE_X, 3) b = nd.full(LARGE_X, 2) @@ -754,7 +754,7 @@ def test_gte(): assert (d.asnumpy() == 1).all() -@with_setup(teardown) +@with_post_test_cleanup() def test_slice_like(): a = create_vector(size=LARGE_X) b = nd.ones(LARGE_X//2) @@ -764,7 +764,7 @@ def test_slice_like(): assert c[-1] == (LARGE_X // 2 - 1) -@with_setup(teardown) +@with_post_test_cleanup() def test_slice_axis(): a = create_vector(size=LARGE_X) med = LARGE_X // 2 @@ -773,7 +773,7 @@ def test_slice_axis(): assert c[-1][0] == (med - 1) -@with_setup(teardown) +@with_post_test_cleanup() def test_full(): a = nd.full(LARGE_X, 3) assert a.shape[0] == LARGE_X diff --git a/tests/python/unittest/common.py b/tests/python/unittest/common.py index 7cd637da3d4f..06fb16288649 100644 --- a/tests/python/unittest/common.py +++ b/tests/python/unittest/common.py @@ -272,6 +272,27 @@ def teardown(): mx.nd.waitall() +def with_post_test_cleanup(): + """ + Helper function that cleans up memory by releasing it from memory pool + Required especially by large tensor tests that have memory footprints in GBs. + """ + def test_helper(orig_test): + @make_decorator(orig_test) + def test_new(*args, **kwargs): + logger = default_logger() + try: + orig_test(*args, **kwargs) + except: + logger.info(test_msg) + raise + finally: + mx.nd.waitall() + mx.cpu().empty_cache() + return test_new + return test_helper + + def run_in_spawned_process(func, env, *args): """ Helper function to run a test in its own process. From 0361959a152ef8e8d6306ab7bdc1f5d851ab3363 Mon Sep 17 00:00:00 2001 From: ChaiBapchya Date: Fri, 11 Oct 2019 14:43:35 -0700 Subject: [PATCH 13/13] removed decorator since it needs C API for CPU memory release --- tests/nightly/test_large_array.py | 108 ----------------------------- tests/nightly/test_large_vector.py | 75 +------------------- 2 files changed, 1 insertion(+), 182 deletions(-) diff --git a/tests/nightly/test_large_array.py b/tests/nightly/test_large_array.py index 748d6e49fabc..e51e220c232f 100644 --- a/tests/nightly/test_large_array.py +++ b/tests/nightly/test_large_array.py @@ -33,7 +33,6 @@ LARGE_SIZE = LARGE_X * SMALL_Y -@with_post_test_cleanup() def test_gluon_embedding(): m = gluon.nn.Embedding(SMALL_Y, MEDIUM_X) m.initialize() @@ -43,7 +42,6 @@ def test_gluon_embedding(): assert b.asnumpy().size == LARGE_SIZE -@with_post_test_cleanup() def test_ndarray_zeros(): a = nd.zeros(shape=(LARGE_X, SMALL_Y)) assert a[-1][0] == 0 @@ -51,14 +49,12 @@ def test_ndarray_zeros(): assert a.size == LARGE_SIZE -@with_post_test_cleanup() def test_ndarray_ones(): a = nd.ones(shape=(LARGE_X, SMALL_Y)) assert a[-1][0] == 1 assert nd.sum(a).asnumpy() == LARGE_SIZE -@with_post_test_cleanup() def test_ndarray_convert(): a = nd.zeros(shape=(LARGE_X, SMALL_Y)) b = a.astype(np.int32) @@ -67,14 +63,12 @@ def test_ndarray_convert(): assert isinstance(b, mx.nd.sparse.RowSparseNDArray) -@with_post_test_cleanup() @with_seed() def test_ndarray_random_uniform(): a = nd.random.uniform(shape=(LARGE_X, SMALL_Y)) assert a[-1][0] != 0 -@with_post_test_cleanup() @with_seed() def test_ndarray_random_randint(): a = nd.random.randint(100, 10000, shape=(LARGE_X, SMALL_Y)) @@ -89,7 +83,6 @@ def test_ndarray_random_randint(): assert a[-1][0].dtype == np.int64 -@with_post_test_cleanup() @with_seed() def test_ndarray_random_exponential(): scale_array = nd.random.uniform(shape=(MEDIUM_X, SMALL_X)) @@ -98,7 +91,6 @@ def test_ndarray_random_exponential(): assert a.shape == (MEDIUM_X, SMALL_X, SMALL_X, SMALL_Y) -@with_post_test_cleanup() @with_seed() def test_ndarray_random_gamma(): alpha_array = nd.random.uniform(shape=(MEDIUM_X, SMALL_X)) @@ -109,7 +101,6 @@ def test_ndarray_random_gamma(): assert a.shape == (MEDIUM_X, SMALL_X, SMALL_X, SMALL_Y) -@with_post_test_cleanup() @with_seed() def test_ndarray_random_multinomial(): # test 1 shape dimension @@ -127,7 +118,6 @@ def test_ndarray_random_multinomial(): assert a[0].shape == (LARGE_X, 2, SMALL_Y) and a[0].shape == a[1].shape -@with_post_test_cleanup() @with_seed() def test_ndarray_random_generalized_negative_binomial(): alpha_array = nd.random.uniform(shape=(MEDIUM_X, SMALL_X)) @@ -138,7 +128,6 @@ def test_ndarray_random_generalized_negative_binomial(): assert a.shape == (MEDIUM_X, SMALL_X, SMALL_X, SMALL_Y) -@with_post_test_cleanup() @with_seed() def test_ndarray_random_negative_binomial(): k_array = nd.random.uniform(shape=(MEDIUM_X, SMALL_X)) @@ -149,7 +138,6 @@ def test_ndarray_random_negative_binomial(): assert a.shape == (MEDIUM_X, SMALL_X, SMALL_X, SMALL_Y) -@with_post_test_cleanup() @with_seed() def test_ndarray_random_normal(): scale_array = nd.random.uniform(shape=(MEDIUM_X, SMALL_X)) @@ -159,7 +147,6 @@ def test_ndarray_random_normal(): assert a.shape == (MEDIUM_X, SMALL_X, SMALL_X, SMALL_Y) -@with_post_test_cleanup() @with_seed() def test_ndarray_random_poisson(): lambda_array = nd.random.uniform(shape=(MEDIUM_X, SMALL_X)) @@ -168,7 +155,6 @@ def test_ndarray_random_poisson(): assert a.shape == (MEDIUM_X, SMALL_X, SMALL_X, SMALL_Y) -@with_post_test_cleanup() @with_seed() def test_ndarray_random_randn(): a = nd.random.randn(LARGE_X, SMALL_Y) @@ -177,7 +163,6 @@ def test_ndarray_random_randn(): # Add check for (x,y,m,n) where x,y shape of loc,scale and m,n input shape -@with_post_test_cleanup() @with_seed() def test_ndarray_random_shuffle(): a = nd.ones(shape=(LARGE_X, SMALL_Y)) @@ -194,13 +179,11 @@ def test_ndarray_random_shuffle(): assert a.shape == (LARGE_X, SMALL_Y) -@with_post_test_cleanup() def test_ndarray_empty(): a = nd.empty((LARGE_X, SMALL_Y)) assert a.shape == (LARGE_X, SMALL_Y) -@with_post_test_cleanup() def test_elementwise(): a = nd.ones(shape=(LARGE_X, SMALL_Y)) b = nd.ones(shape=(LARGE_X, SMALL_Y)) @@ -212,13 +195,11 @@ def test_elementwise(): assert np.sum(res[-1].asnumpy() == 2) == a.shape[1] -@with_post_test_cleanup() def test_reduce(): a = nd.ones(shape=(LARGE_X, SMALL_Y)) assert nd.sum(a).asnumpy() == a.shape[0] * a.shape[1] -@with_post_test_cleanup() def test_dot(): a = nd.ones(shape=(LARGE_X, SMALL_Y)) b = nd.ones(shape=(SMALL_Y, SMALL_Y)) @@ -226,7 +207,6 @@ def test_dot(): assert np.sum(res[-1].asnumpy() == SMALL_Y) == b.shape[1] -@with_post_test_cleanup() def test_FullyConnected(): a = nd.ones(shape=(LARGE_X, SMALL_Y)) b = nd.ones(shape=(SMALL_Y, SMALL_Y)) @@ -234,7 +214,6 @@ def test_FullyConnected(): assert np.sum(res[-1].asnumpy() == SMALL_Y) == b.shape[1] -@with_post_test_cleanup() def test_broadcast(): a = nd.ones(shape=(LARGE_X, SMALL_Y)) b = nd.arange(0, LARGE_X).reshape(LARGE_X, 1) @@ -244,14 +223,12 @@ def test_broadcast(): assert np.sum(res[-1].asnumpy() == LARGE_X) == a.shape[1] -@with_post_test_cleanup() def test_clip(): a = nd.arange(0, LARGE_X * SMALL_Y).reshape(LARGE_X, SMALL_Y) res = nd.clip(a, a_min=100, a_max=1000) assert np.sum(res[-1].asnumpy() == 1000) == a.shape[1] -@with_post_test_cleanup() def test_split(): a = nd.arange(0, LARGE_X * SMALL_Y).reshape(LARGE_X, SMALL_Y) outs = nd.split(a, num_outputs=SMALL_Y, axis=1) @@ -259,21 +236,18 @@ def test_split(): assert result == a.shape[1] -@with_post_test_cleanup() def test_argmin(): a = nd.arange(0, LARGE_X * SMALL_Y).reshape(LARGE_X, SMALL_Y) idx = mx.nd.argmin(a, axis=0) assert idx.shape[0] == SMALL_Y -@with_post_test_cleanup() def test_tile(): a = nd.arange(0, LARGE_X).reshape(LARGE_X, 1) b = nd.tile(a, reps=(1, SMALL_Y)) assert np.sum(b[-1].asnumpy() == LARGE_X) == b.shape[1] -@with_post_test_cleanup() def test_take(): a = nd.ones(shape=(LARGE_X, SMALL_Y)) idx = nd.arange(LARGE_X - 1000, LARGE_X) @@ -281,21 +255,18 @@ def test_take(): assert np.sum(res[-1].asnumpy() == 1) == res.shape[1] -@with_post_test_cleanup() def test_slice(): a = nd.ones(shape=(LARGE_X, SMALL_Y)) res = nd.slice(a, begin=(LARGE_X-1000, 1), end=(LARGE_X, SMALL_Y)) assert np.sum(res[-1].asnumpy() == 1) == res.shape[1] -@with_post_test_cleanup() def test_slice_assign(): a = nd.ones(shape=(LARGE_X, SMALL_Y)) a[LARGE_X-1:LARGE_X] = 1000 assert np.sum(a[-1].asnumpy() == 1000) == a.shape[1] -@with_post_test_cleanup() def test_expand_dims(): a = nd.ones(shape=(LARGE_X, SMALL_Y)) res = nd.expand_dims(a, axis=1) @@ -303,7 +274,6 @@ def test_expand_dims(): assert res.shape == (a.shape[0], 1, a.shape[1]) -@with_post_test_cleanup() def test_squeeze(): a = nd.ones(shape=(LARGE_X, SMALL_Y)) data = nd.expand_dims(a, axis=1) @@ -311,7 +281,6 @@ def test_squeeze(): assert res.shape == a.shape -@with_post_test_cleanup() def test_broadcast_div(): a = nd.ones(shape=(LARGE_X, SMALL_Y)) b = nd.ones(shape=(LARGE_X, 1)) * 2 @@ -319,7 +288,6 @@ def test_broadcast_div(): assert np.sum(res[-1].asnumpy() == 0.5) == a.shape[1] -@with_post_test_cleanup() def test_Dense(ctx=mx.cpu(0)): data = mx.nd.ones(shape=(50*1000*1000, 100)) linear = gluon.nn.Dense(100) @@ -328,7 +296,6 @@ def test_Dense(ctx=mx.cpu(0)): assert res.shape == (50000000, 100) -@with_post_test_cleanup() def test_where(): a = nd.ones(shape=(LARGE_X, SMALL_Y)) b = nd.arange(0, LARGE_X * SMALL_Y).reshape(LARGE_X, SMALL_Y) @@ -339,7 +306,6 @@ def test_where(): assert np.sum(res[0].asnumpy() == 1) == 10 -@with_post_test_cleanup() def test_pick(): a = mx.nd.ones(shape=(256 * 35, 1024 * 1024)) b = mx.nd.ones(shape=(256 * 35, )) @@ -347,7 +313,6 @@ def test_pick(): assert res.shape == b.shape -@with_post_test_cleanup() def test_depthtospace(): def numpy_depth_to_space(x, blocksize): b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3] @@ -366,7 +331,6 @@ def numpy_depth_to_space(x, blocksize): assert_almost_equal(output.asnumpy(), expected, atol=1e-3, rtol=1e-3) -@with_post_test_cleanup() def test_spacetodepth(): def numpy_space_to_depth(x, blocksize): b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3] @@ -385,7 +349,6 @@ def numpy_space_to_depth(x, blocksize): assert_almost_equal(output.asnumpy(), expected, atol=1e-3, rtol=1e-3) -@with_post_test_cleanup() @with_seed() def test_diag(): a_np = np.random.random((LARGE_X, SMALL_Y)).astype(np.float32) @@ -411,7 +374,6 @@ def test_diag(): assert_almost_equal(r.asnumpy(), np.diag(a_np, k=k)) -@with_post_test_cleanup() @with_seed() def test_ravel_multi_index(): x1, y1 = rand_coord_2d((LARGE_X - 100), LARGE_X, 10, SMALL_Y) @@ -424,7 +386,6 @@ def test_ravel_multi_index(): assert np.sum(1 for i in range(idx.size) if idx[i] == idx_numpy[i]) == 3 -@with_post_test_cleanup() @with_seed() def test_unravel_index(): x1, y1 = rand_coord_2d((LARGE_X - 100), LARGE_X, 10, SMALL_Y) @@ -437,7 +398,6 @@ def test_unravel_index(): assert (indices_2d.asnumpy() == np.array(original_2d_indices)).all() -@with_post_test_cleanup() def test_transpose(): b = create_2d_tensor(rows=LARGE_X, columns=SMALL_Y) t = b.T @@ -445,7 +405,6 @@ def test_transpose(): assert t.shape == (SMALL_Y, LARGE_X) -@with_post_test_cleanup() def test_swapaxes(): b = create_2d_tensor(rows=LARGE_X, columns=SMALL_Y) t = nd.swapaxes(b, dim1=0, dim2=1) @@ -453,7 +412,6 @@ def test_swapaxes(): assert t.shape == (SMALL_Y, LARGE_X) -@with_post_test_cleanup() def test_flip(): b = create_2d_tensor(rows=LARGE_X, columns=SMALL_Y) t = nd.flip(b, axis=0) @@ -461,7 +419,6 @@ def test_flip(): assert t.shape == (LARGE_X, SMALL_Y) -@with_post_test_cleanup() def test_softmax(): input_data = mx.nd.ones((SMALL_Y, LARGE_X)) true_output = np.full((SMALL_Y, LARGE_X), (1 / SMALL_Y)) @@ -469,7 +426,6 @@ def test_softmax(): assert_almost_equal(output.asnumpy(), true_output, rtol=1e-5, atol=1e-5) -@with_post_test_cleanup() def test_argsort(): b = create_2d_tensor(rows=LARGE_X, columns=SMALL_Y) s = nd.argsort(b, axis=0, is_ascend=False, dtype=np.int64) @@ -477,7 +433,6 @@ def test_argsort(): assert (s[0].asnumpy() == (LARGE_X - 1)).all() -@with_post_test_cleanup() def test_sort(): b = create_2d_tensor(rows=LARGE_X, columns=SMALL_Y) s = nd.sort(b, axis=0, is_ascend=False) @@ -486,7 +441,6 @@ def test_sort(): assert np.sum(s[0].asnumpy() == 0).all() -@with_post_test_cleanup() def test_topk(): b = create_2d_tensor(rows=LARGE_X, columns=SMALL_Y) k = nd.topk(b, k=10, axis=0, dtype=np.int64) @@ -499,7 +453,6 @@ def test_topk(): assert l.sum() == np.sum(np.arange(0, SMALL_Y)) -@with_post_test_cleanup() def test_exponent_logarithm_operators(): a = 2*nd.ones(shape=(LARGE_X, SMALL_Y)) # exponent @@ -533,7 +486,6 @@ def test_exponent_logarithm_operators(): assert result.shape == a.shape -@with_post_test_cleanup() def test_power_operators(): a = 2*nd.ones(shape=(LARGE_X, SMALL_Y)) # sqrt @@ -567,7 +519,6 @@ def test_power_operators(): assert result.shape == a.shape -@with_post_test_cleanup() def test_sequence_mask(): # Sequence Mask input [max_sequence_length, batch_size, other_feature_dims] # test with input batch_size = 2 @@ -591,7 +542,6 @@ def test_sequence_mask(): assert b[-1][-1][-1] == -1 -@with_post_test_cleanup() def test_sequence_reverse(): a = nd.arange(0, LARGE_X * SMALL_Y * 2).reshape(LARGE_X, 2, SMALL_Y) # test as reverse operator @@ -608,7 +558,6 @@ def test_sequence_reverse(): assert b.shape == a.shape -@with_post_test_cleanup() def test_sequence_last(): a = nd.arange(0, LARGE_X * SMALL_Y * 2).reshape(LARGE_X, 2, SMALL_Y) @@ -626,7 +575,6 @@ def test_sequence_last(): assert b[0][-1] == a[1][0][-1] -@with_post_test_cleanup() def test_softmax_cross_entropy(): # dtype of input data, mxnet cross entropy set explicitly to float64 # numpy implicitly takes care of double precision @@ -651,7 +599,6 @@ def test_softmax_cross_entropy(): true_softmax_cross_entropy, rtol=1e-3, atol=1e-5) -@with_post_test_cleanup() def test_index_copy(): x = mx.nd.zeros((LARGE_X, SMALL_Y)) t = mx.nd.arange(1, SMALL_Y + 1).reshape((1, SMALL_Y)) @@ -661,7 +608,6 @@ def test_index_copy(): assert x[-1][-1] == t[0][-1] -@with_post_test_cleanup() def testSoftmaxOutput(): x = mx.sym.Variable('x') label = mx.sym.Variable('label') @@ -688,7 +634,6 @@ def testSoftmaxOutput(): # TODO: correctness of prelu (currently flaky) -@with_post_test_cleanup() def test_leaky_relu(): a = -1*mx.nd.ones((LARGE_X, SMALL_Y)) @@ -718,7 +663,6 @@ def test_rrelu(): test_rrelu() -@with_post_test_cleanup() def test_pooling(): a = mx.nd.ones((MEDIUM_X, 200, SMALL_Y, SMALL_Y)) @@ -752,7 +696,6 @@ def test_lp_pooling(): test_lp_pooling() -@with_post_test_cleanup() def test_layer_norm(): dtype = np.float32 forward_check_eps = 1E-3 @@ -793,7 +736,6 @@ def npy_layer_norm(data, gamma, beta, axis=1, eps=1E-5): # TODO: correctness of dropout # currently only test for dropout to work # since testing for correctness involves flakiness issue #14288 -@with_post_test_cleanup() def test_dropout(): shape = (LARGE_X, SMALL_Y) x = mx.sym.var('data') @@ -805,7 +747,6 @@ def test_dropout(): assert out[0].shape == shape -@with_post_test_cleanup() def test_activation(): x = mx.nd.ones((LARGE_X, SMALL_Y)) test_x = -2 @@ -838,7 +779,6 @@ def test_activation(): # TODO: correctness of batchnorm # in future, we could test if mean, var of output # matches target output's mean, var -@with_post_test_cleanup() def test_batchnorm(): shape = (LARGE_X, SMALL_Y) axis = 1 # default @@ -855,7 +795,6 @@ def test_batchnorm(): assert output.shape == shape -@with_post_test_cleanup() def test_add(): a = nd.ones(shape=(LARGE_X, SMALL_Y)) b = nd.ones(shape=(LARGE_X, SMALL_Y)) @@ -865,7 +804,6 @@ def test_add(): assert c.shape == a.shape -@with_post_test_cleanup() def test_sub(): a = 3*nd.ones(shape=(LARGE_X, SMALL_Y)) b = nd.ones(shape=(LARGE_X, SMALL_Y)) @@ -875,7 +813,6 @@ def test_sub(): assert c.shape == a.shape -@with_post_test_cleanup() def test_rsub(): a = 3*nd.ones(shape=(LARGE_X, SMALL_Y)) b = nd.ones(shape=(LARGE_X, SMALL_Y)) @@ -885,7 +822,6 @@ def test_rsub(): assert c.shape == a.shape -@with_post_test_cleanup() def test_neg(): a = nd.ones(shape=(LARGE_X, SMALL_Y)) c = a @@ -894,7 +830,6 @@ def test_neg(): assert c.shape == a.shape -@with_post_test_cleanup() def test_mul(): a = 2*nd.ones(shape=(LARGE_X, SMALL_Y)) b = 3*nd.ones(shape=(LARGE_X, SMALL_Y)) @@ -904,7 +839,6 @@ def test_mul(): assert c.shape == a.shape -@with_post_test_cleanup() def test_div(): a = 2*nd.ones(shape=(LARGE_X, SMALL_Y)) b = 3*nd.ones(shape=(LARGE_X, SMALL_Y)) @@ -914,7 +848,6 @@ def test_div(): assert c.shape == a.shape -@with_post_test_cleanup() def test_rdiv(): a = 2*nd.ones(shape=(LARGE_X, SMALL_Y)) b = 3*nd.ones(shape=(LARGE_X, SMALL_Y)) @@ -924,7 +857,6 @@ def test_rdiv(): assert c.shape == a.shape -@with_post_test_cleanup() def test_mod(): a = 2*nd.ones(shape=(LARGE_X, SMALL_Y)) b = 3*nd.ones(shape=(LARGE_X, SMALL_Y)) @@ -934,7 +866,6 @@ def test_mod(): assert c.shape == a.shape -@with_post_test_cleanup() def test_rmod(): a = 2*nd.ones(shape=(LARGE_X, SMALL_Y)) b = 3*nd.ones(shape=(LARGE_X, SMALL_Y)) @@ -944,7 +875,6 @@ def test_rmod(): assert c.shape == a.shape -@with_post_test_cleanup() def test_imod(): a = 2*nd.ones(shape=(LARGE_X, SMALL_Y)) b = 3*nd.ones(shape=(LARGE_X, SMALL_Y)) @@ -954,7 +884,6 @@ def test_imod(): assert c.shape == a.shape -@with_post_test_cleanup() def test_pow(): a = 2*nd.ones(shape=(LARGE_X, SMALL_Y)) b = 3*nd.ones(shape=(LARGE_X, SMALL_Y)) @@ -964,7 +893,6 @@ def test_pow(): assert c.shape == a.shape -@with_post_test_cleanup() def test_rpow(): a = 2*nd.ones(shape=(LARGE_X, SMALL_Y)) b = 3*nd.ones(shape=(LARGE_X, SMALL_Y)) @@ -974,21 +902,18 @@ def test_rpow(): assert c.shape == a.shape -@with_post_test_cleanup() def test_shape(): b = create_2d_tensor(rows=SMALL_Y, columns=LARGE_X) mx.nd.waitall() assert b.shape == (SMALL_Y, LARGE_X) -@with_post_test_cleanup() def test_size(): b = create_2d_tensor(rows=SMALL_Y, columns=LARGE_X) mx.nd.waitall() assert b.size == LARGE_SIZE -@with_post_test_cleanup() def test_copy(): a = nd.ones((SMALL_Y, LARGE_X)) b = a.copy() @@ -997,7 +922,6 @@ def test_copy(): assert b.size == LARGE_SIZE -@with_post_test_cleanup() def test_copy_to(): a = create_2d_tensor(rows=SMALL_Y, columns=LARGE_X) b = nd.array(np.zeros((SMALL_Y, LARGE_X))) @@ -1006,7 +930,6 @@ def test_copy_to(): assert b[-1][-1] == SMALL_Y-1 -@with_post_test_cleanup() def test_zeros_like(): a = nd.array(np.ones((SMALL_Y, LARGE_X))) b = nd.zeros_like(a) @@ -1014,7 +937,6 @@ def test_zeros_like(): assert b.shape == a.shape -@with_post_test_cleanup() def test_ones_like(): a = nd.array(np.zeros((SMALL_Y, LARGE_X))) b = nd.ones_like(a) @@ -1022,7 +944,6 @@ def test_ones_like(): assert b.shape == a.shape -@with_post_test_cleanup() def test_reshape_like(): a = nd.array(np.zeros((SMALL_Y, LARGE_X))) b = nd.array(np.zeros((SMALL_Y//2, LARGE_X*2))) @@ -1030,7 +951,6 @@ def test_reshape_like(): assert c.shape == (SMALL_Y//2, LARGE_X*2) -@with_post_test_cleanup() def test_flatten(): a = create_2d_tensor(rows=LARGE_X, columns=SMALL_Y).reshape((LARGE_X//2, 2, SMALL_Y)) b = nd.flatten(a) @@ -1039,7 +959,6 @@ def test_flatten(): assert b.shape == (LARGE_X//2, SMALL_Y*2) -@with_post_test_cleanup() def test_concat(): a = nd.array(np.ones((SMALL_Y, LARGE_X))) b = nd.array(np.zeros((SMALL_Y, LARGE_X))) @@ -1047,7 +966,6 @@ def test_concat(): assert c.shape == (b.shape[0]*2, LARGE_X) -@with_post_test_cleanup() def test_stack(): a = nd.array(np.ones((SMALL_Y, LARGE_X))) b = nd.array(np.zeros((SMALL_Y, LARGE_X))) @@ -1055,35 +973,30 @@ def test_stack(): assert c.shape == (b.shape[0], 2, LARGE_X) -@with_post_test_cleanup() def test_broadcast_axes(): a = create_2d_tensor(rows=1, columns=LARGE_X) b = nd.broadcast_axis(a, axis=[0], size=2) assert b.shape == (a.shape[0]*2, a.shape[1]) -@with_post_test_cleanup() def test_sum(): a = nd.array(np.ones((SMALL_Y, LARGE_X))) b = nd.sum(a, axis=1) assert b.shape[0] == SMALL_Y -@with_post_test_cleanup() def test_prod(): a = nd.array(np.ones((SMALL_Y, LARGE_X))) b = nd.prod(a, axis=1) assert b.shape[0] == SMALL_Y -@with_post_test_cleanup() def test_mean(): a = create_2d_tensor(rows=SMALL_Y, columns=LARGE_X) b = nd.mean(a, axis=0) assert b[0] == (SMALL_Y/2-1) -@with_post_test_cleanup() def test_min(): a = create_2d_tensor(rows=SMALL_Y, columns=LARGE_X) b = nd.min(a, axis=0) @@ -1091,7 +1004,6 @@ def test_min(): assert b[-1] == 0 -@with_post_test_cleanup() def test_max(): a = create_2d_tensor(rows=SMALL_Y, columns=LARGE_X) b = nd.max(a, axis=0) @@ -1099,7 +1011,6 @@ def test_max(): assert b[-1] == (SMALL_Y-1) -@with_post_test_cleanup() def test_norm(): a = np.array(np.full((1, LARGE_X), 3)) b = np.array(np.full((1, LARGE_X), 4)) @@ -1112,7 +1023,6 @@ def test_norm(): assert e[-1] == 7 -@with_post_test_cleanup() def test_argmax(): a = np.ones((SMALL_Y, LARGE_X)) b = np.zeros((SMALL_Y, LARGE_X)) @@ -1122,7 +1032,6 @@ def test_argmax(): assert d[-1] == d[0] == 0 -@with_post_test_cleanup() def test_relu(): def frelu(x): return np.maximum(x, 0.0) @@ -1140,7 +1049,6 @@ def frelu_grad(x): check_symbolic_forward(y, [xa], [ya]) -@with_post_test_cleanup() def test_sigmoid(): def fsigmoid(a): return np.divide(1.0, (1.0 + np.exp(-a))) @@ -1152,7 +1060,6 @@ def fsigmoid(a): check_symbolic_forward(y, [xa], [ya]) -@with_post_test_cleanup() def np_softmax(x, axis=-1, temperature=1.0): x = x - np.max(x, axis=axis, keepdims=True) x = np.exp(x/temperature) @@ -1160,7 +1067,6 @@ def np_softmax(x, axis=-1, temperature=1.0): return x -@with_post_test_cleanup() def test_log_softmax(): ndim = 2 shape = (SMALL_Y, LARGE_X) @@ -1170,7 +1076,6 @@ def test_log_softmax(): check_symbolic_forward(sym, [data], [np.log(np_softmax(data, axis=axis)+1e-20)]) -@with_post_test_cleanup() def test_iadd(): a = nd.array(np.ones((SMALL_Y, LARGE_X))) b = nd.array(np.ones((SMALL_Y, LARGE_X))) @@ -1180,7 +1085,6 @@ def test_iadd(): assert c[0][-1] == 2 -@with_post_test_cleanup() def test_isub(): a = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 3))) b = nd.array(np.ones((SMALL_Y, LARGE_X))) @@ -1190,7 +1094,6 @@ def test_isub(): assert c[0][-1] == 2 -@with_post_test_cleanup() def test_imul(): a = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 3))) b = nd.array(np.ones((SMALL_Y, LARGE_X))) @@ -1200,7 +1103,6 @@ def test_imul(): assert c[0][-1] == 3 -@with_post_test_cleanup() def test_idiv(): a = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 4))) b = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 2))) @@ -1210,7 +1112,6 @@ def test_idiv(): assert c[0][-1] == 2 -@with_post_test_cleanup() def test_eq(): a = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 3))) b = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 3))) @@ -1218,7 +1119,6 @@ def test_eq(): assert np.sum(c[0].asnumpy() == 1).all() -@with_post_test_cleanup() def test_neq(): a = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 2))) b = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 3))) @@ -1226,7 +1126,6 @@ def test_neq(): assert np.sum(c[0].asnumpy() == 1).all() -@with_post_test_cleanup() def test_lt(): a = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 2))) b = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 3))) @@ -1234,7 +1133,6 @@ def test_lt(): assert np.sum(d[0].asnumpy() == 1).all() -@with_post_test_cleanup() def test_lte(): a = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 2))) b = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 3))) @@ -1245,7 +1143,6 @@ def test_lte(): assert np.sum(e[0].asnumpy() == 1).all() -@with_post_test_cleanup() def test_gt(): a = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 3))) b = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 2))) @@ -1253,7 +1150,6 @@ def test_gt(): assert np.sum(d[0].asnumpy() == 1).all() -@with_post_test_cleanup() def test_gte(): a = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 3))) b = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 2))) @@ -1264,7 +1160,6 @@ def test_gte(): assert np.sum(e[0].asnumpy() == 1).all() -@with_post_test_cleanup() def test_slice_like(): a = create_2d_tensor(rows=SMALL_Y, columns=LARGE_X) b = nd.array(np.ones((SMALL_Y//2, LARGE_X//2))) @@ -1279,7 +1174,6 @@ def test_slice_like(): assert e[-1][-1] == (SMALL_Y-1) -@with_post_test_cleanup() def test_slice_axis(): a = create_2d_tensor(rows=SMALL_Y, columns=LARGE_X) c = nd.slice_axis(a, axis=0, begin=0, end=SMALL_Y//2) @@ -1290,7 +1184,6 @@ def test_slice_axis(): assert d[-1][-1] == (SMALL_Y-1) -@with_post_test_cleanup() def test_one_hot(): # default dtype of ndarray is float32 which cannot index elements over 2^32 a = nd.array([1, (VLARGE_X - 1)], dtype=np.int64) @@ -1299,7 +1192,6 @@ def test_one_hot(): b[1][-1] == 1 -@with_post_test_cleanup() def test_full(): a = nd.full((SMALL_Y, LARGE_X), 3) assert a.shape == (SMALL_Y, LARGE_X) diff --git a/tests/nightly/test_large_vector.py b/tests/nightly/test_large_vector.py index e5411d6ef7dc..aa6cb3d75b37 100644 --- a/tests/nightly/test_large_vector.py +++ b/tests/nightly/test_large_vector.py @@ -21,7 +21,7 @@ from mxnet.test_utils import rand_ndarray, assert_almost_equal, rand_coord_2d, create_vector from mxnet import gluon, nd -from tests.python.unittest.common import with_seed, with_post_test_cleanup +from tests.python.unittest.common import with_seed from nose.tools import with_setup # dimension constants @@ -29,7 +29,6 @@ MEDIUM_X = 1000000000 -@with_post_test_cleanup() def test_slice(): a = nd.ones(LARGE_X) res = nd.slice(a, begin=(LARGE_X - MEDIUM_X), end=LARGE_X) @@ -37,7 +36,6 @@ def test_slice(): assert res[0] == 1 -@with_post_test_cleanup() def test_ndarray_zeros(): a = nd.zeros(shape=LARGE_X) assert a[-1] == 0 @@ -45,21 +43,18 @@ def test_ndarray_zeros(): assert a.size == LARGE_X -@with_post_test_cleanup() def test_ndarray_ones(): a = nd.ones(shape=LARGE_X) assert a[-1] == 1 assert nd.sum(a) == LARGE_X -@with_post_test_cleanup() @with_seed() def test_ndarray_random_uniform(): a = nd.random.uniform(shape=LARGE_X) assert a[-1] != 0 -@with_post_test_cleanup() @with_seed() def test_ndarray_random_randint(): # check if randint can generate value greater than 2**32 (large) @@ -70,13 +65,11 @@ def test_ndarray_random_randint(): assert (a >= low).all() and (a < high).all() -@with_post_test_cleanup() def test_ndarray_empty(): a = nd.empty(LARGE_X) assert a.shape == (LARGE_X,) -@with_post_test_cleanup() def test_elementwise(): a = nd.ones(shape=LARGE_X) b = nd.ones(shape=LARGE_X) @@ -88,14 +81,12 @@ def test_elementwise(): assert res[-1].asnumpy() == 3 -@with_post_test_cleanup() def test_clip(): a = create_vector(LARGE_X) res = nd.clip(a, a_min=100, a_max=1000) assert res[-1] == 1000 -@with_post_test_cleanup() def test_argmin(): a = create_vector(LARGE_X, dtype=np.float32) assert a[0] == 0 @@ -104,7 +95,6 @@ def test_argmin(): assert idx.shape[0] == 1 -@with_post_test_cleanup() def test_take(): a = nd.ones(shape=LARGE_X) idx = nd.arange(LARGE_X - 1000, LARGE_X) @@ -112,14 +102,12 @@ def test_take(): assert np.sum(res.asnumpy() == 1) == res.shape[0] -@with_post_test_cleanup() def test_slice_assign(): a = nd.ones(shape=LARGE_X) a[LARGE_X-1:LARGE_X] = 1000 assert np.sum(a[-1].asnumpy() == 1000) == 1 -@with_post_test_cleanup() def test_expand_dims(): a = nd.ones(shape=LARGE_X) res = nd.expand_dims(a, axis=0) @@ -127,7 +115,6 @@ def test_expand_dims(): assert res.shape == (1, a.shape[0]) -@with_post_test_cleanup() def test_squeeze(): a = nd.ones(shape=LARGE_X) data = nd.expand_dims(a, axis=0) @@ -136,7 +123,6 @@ def test_squeeze(): assert res.shape == a.shape -@with_post_test_cleanup() def test_broadcast_div(): a = nd.ones(shape=LARGE_X) b = nd.ones(shape=LARGE_X) * 2 @@ -144,7 +130,6 @@ def test_broadcast_div(): assert np.sum(res.asnumpy() == 0.5) == a.shape[0] -@with_post_test_cleanup() def test_Dense(ctx=mx.cpu(0)): data = mx.nd.ones(shape=LARGE_X) linear = gluon.nn.Dense(2) @@ -153,14 +138,12 @@ def test_Dense(ctx=mx.cpu(0)): assert res.shape == (LARGE_X, 2) -@with_post_test_cleanup() def test_argsort(): a = create_vector(size=LARGE_X) s = nd.argsort(a, axis=0, is_ascend=False, dtype=np.int64) assert s[0] == (LARGE_X - 1) -@with_post_test_cleanup() def test_sort(): a = create_vector(size=LARGE_X) @@ -176,7 +159,6 @@ def test_ascend(x): test_ascend(a) -@with_post_test_cleanup() def test_topk(): a = create_vector(size=LARGE_X) ind = nd.topk(a, k=10, axis=0, dtype=np.int64) @@ -188,14 +170,12 @@ def test_topk(): assert val == (LARGE_X - 1) -@with_post_test_cleanup() def test_mean(): a = nd.arange(-LARGE_X // 2, LARGE_X // 2 + 1, dtype=np.int64) b = nd.mean(a, axis=0) assert b == 0 -@with_post_test_cleanup() @with_seed() def test_ndarray_random_exponential(): a = nd.random.exponential(shape=LARGE_X) @@ -203,7 +183,6 @@ def test_ndarray_random_exponential(): assert a.shape[0] == LARGE_X -@with_post_test_cleanup() @with_seed() def test_ndarray_random_gamma(): a = nd.random.gamma(shape=LARGE_X) @@ -211,7 +190,6 @@ def test_ndarray_random_gamma(): assert a.shape[0] == LARGE_X -@with_post_test_cleanup() @with_seed() def test_ndarray_random_generalized_negative_binomial(): a = nd.random.generalized_negative_binomial(shape=LARGE_X) @@ -219,7 +197,6 @@ def test_ndarray_random_generalized_negative_binomial(): assert a.shape[0] == LARGE_X -@with_post_test_cleanup() @with_seed() def test_ndarray_random_multinomial(): a = nd.random.multinomial(nd.random.uniform(shape=LARGE_X)) @@ -227,7 +204,6 @@ def test_ndarray_random_multinomial(): assert a.shape[0] == 1 -@with_post_test_cleanup() @with_seed() def test_ndarray_random_negative_binomial(): a = nd.random.negative_binomial(shape=LARGE_X) @@ -235,14 +211,12 @@ def test_ndarray_random_negative_binomial(): assert a.shape[0] == LARGE_X -@with_post_test_cleanup() @with_seed() def test_ndarray_random_normal(): a = nd.random.normal(shape=LARGE_X) assert a.shape[0] == LARGE_X -@with_post_test_cleanup() @with_seed() def test_ndarray_random_poisson(): a = nd.random.poisson(shape=LARGE_X) @@ -250,14 +224,12 @@ def test_ndarray_random_poisson(): assert a.shape[0] == LARGE_X -@with_post_test_cleanup() @with_seed() def test_ndarray_random_randn(): a = nd.random.randn(LARGE_X) assert a.shape[0] == LARGE_X -@with_post_test_cleanup() @with_seed() def test_ndarray_random_shuffle(): a = nd.ones(shape=LARGE_X) @@ -270,7 +242,6 @@ def test_ndarray_random_shuffle(): assert a.shape[0] == LARGE_X -@with_post_test_cleanup() def test_exponent_logarithm_operators(): a = 2*nd.ones(shape=LARGE_X) # exponent @@ -304,7 +275,6 @@ def test_exponent_logarithm_operators(): assert result.shape == a.shape -@with_post_test_cleanup() def test_power_operators(): a = 2*nd.ones(shape=LARGE_X) # sqrt @@ -338,7 +308,6 @@ def test_power_operators(): assert result.shape == a.shape -@with_post_test_cleanup() def test_sequence_mask(): # Sequence Mask input [max_sequence_length, batch_size] # test with input batch_size = 2 @@ -362,7 +331,6 @@ def test_sequence_mask(): assert b[-1][-1] == -1 -@with_post_test_cleanup() def test_sequence_reverse(): a = nd.arange(0, LARGE_X * 2).reshape(LARGE_X, 2) # test as reverse operator @@ -378,7 +346,6 @@ def test_sequence_reverse(): assert b.shape == a.shape -@with_post_test_cleanup() def test_sequence_last(): a = nd.arange(0, LARGE_X * 2).reshape(LARGE_X, 2) @@ -400,7 +367,6 @@ def test_sequence_last(): # TODO: correctness of layernorm # numpy implementation for large vector is flaky -@with_post_test_cleanup() def test_layer_norm(): axis = 0 eps = 1E-5 @@ -416,7 +382,6 @@ def test_layer_norm(): # TODO: correctness of batchnorm # in future, we could test if mean, var of output # matches target output's mean, var -@with_post_test_cleanup() def test_batchnorm(): shape = LARGE_X axis = 0 # since vector @@ -432,7 +397,6 @@ def test_batchnorm(): assert output.shape == (shape,) -@with_post_test_cleanup() def test_add(): a = nd.ones(shape=LARGE_X) b = nd.ones(shape=LARGE_X) @@ -442,7 +406,6 @@ def test_add(): assert c.shape == a.shape -@with_post_test_cleanup() def test_sub(): a = 3*nd.ones(shape=LARGE_X) b = nd.ones(shape=LARGE_X) @@ -452,7 +415,6 @@ def test_sub(): assert c.shape == a.shape -@with_post_test_cleanup() def test_rsub(): a = 3*nd.ones(shape=LARGE_X) b = nd.ones(shape=LARGE_X) @@ -462,7 +424,6 @@ def test_rsub(): assert c.shape == a.shape -@with_post_test_cleanup() def test_neg(): a = nd.ones(shape=LARGE_X) c = a @@ -471,7 +432,6 @@ def test_neg(): assert c.shape == a.shape -@with_post_test_cleanup() def test_mul(): a = 2*nd.ones(shape=LARGE_X) b = 3*nd.ones(shape=LARGE_X) @@ -481,7 +441,6 @@ def test_mul(): assert c.shape == a.shape -@with_post_test_cleanup() def test_div(): a = 2*nd.ones(shape=LARGE_X) b = 3*nd.ones(shape=LARGE_X) @@ -491,7 +450,6 @@ def test_div(): assert c.shape == a.shape -@with_post_test_cleanup() def test_rdiv(): a = 2*nd.ones(shape=LARGE_X) b = 3*nd.ones(shape=LARGE_X) @@ -501,7 +459,6 @@ def test_rdiv(): assert c.shape == a.shape -@with_post_test_cleanup() def test_mod(): a = 2*nd.ones(shape=LARGE_X) b = 3*nd.ones(shape=LARGE_X) @@ -511,7 +468,6 @@ def test_mod(): assert c.shape == a.shape -@with_post_test_cleanup() def test_rmod(): a = 2*nd.ones(shape=LARGE_X) b = 3*nd.ones(shape=LARGE_X) @@ -521,7 +477,6 @@ def test_rmod(): assert c.shape == a.shape -@with_post_test_cleanup() def test_imod(): a = 2*nd.ones(shape=LARGE_X) b = 3*nd.ones(shape=LARGE_X) @@ -531,7 +486,6 @@ def test_imod(): assert c.shape == a.shape -@with_post_test_cleanup() def test_pow(): a = 2*nd.ones(shape=LARGE_X) b = 3*nd.ones(shape=LARGE_X) @@ -541,7 +495,6 @@ def test_pow(): assert c.shape == a.shape -@with_post_test_cleanup() def test_rpow(): a = 2*nd.ones(shape=LARGE_X) b = 3*nd.ones(shape=LARGE_X) @@ -551,7 +504,6 @@ def test_rpow(): assert c.shape == a.shape -@with_post_test_cleanup() def test_shape(): b = create_vector(size=LARGE_X) # explicit wait_to_read() @@ -559,7 +511,6 @@ def test_shape(): assert b.shape[0] == LARGE_X -@with_post_test_cleanup() def test_size(): b = create_vector(size=LARGE_X) # explicit wait_to_read() @@ -567,7 +518,6 @@ def test_size(): assert b.size == LARGE_X -@with_post_test_cleanup() def test_copy(): a = nd.ones(LARGE_X) b = a.copy() @@ -576,7 +526,6 @@ def test_copy(): assert b.size == LARGE_X -@with_post_test_cleanup() def test_copy_to(): a = create_vector(size=LARGE_X) # keeping dtype same as input uses parallel copy which is much faster @@ -587,7 +536,6 @@ def test_copy_to(): assert b[0] == 0 -@with_post_test_cleanup() def test_zeros_like(): a = nd.ones(LARGE_X) b = nd.zeros_like(a) @@ -595,7 +543,6 @@ def test_zeros_like(): assert b.shape == a.shape -@with_post_test_cleanup() def test_ones_like(): a = nd.zeros(LARGE_X) b = nd.ones_like(a) @@ -603,7 +550,6 @@ def test_ones_like(): assert b.shape == a.shape -@with_post_test_cleanup() def test_concat(): a = nd.ones(LARGE_X) b = nd.zeros(LARGE_X) @@ -613,21 +559,18 @@ def test_concat(): assert c.shape[0] == (2 * LARGE_X) -@with_post_test_cleanup() def test_sum(): a = nd.ones(LARGE_X) b = nd.sum(a, axis=0) assert b[0] == LARGE_X -@with_post_test_cleanup() def test_prod(): a = nd.ones(LARGE_X) b = nd.prod(a, axis=0) assert b[0] == 1 -@with_post_test_cleanup() def test_min(): a = create_vector(size=LARGE_X) b = nd.min(a, axis=0) @@ -635,14 +578,12 @@ def test_min(): assert b[-1] == 0 -@with_post_test_cleanup() def test_max(): a = create_vector(size=LARGE_X) b = nd.max(a, axis=0) assert b[0] == (LARGE_X - 1) -@with_post_test_cleanup() def test_argmax(): a = nd.ones(LARGE_X) b = nd.zeros(LARGE_X) @@ -652,7 +593,6 @@ def test_argmax(): assert d == 0 -@with_post_test_cleanup() def np_softmax(x, axis=-1, temperature=1.0): x = x - np.max(x, axis=axis, keepdims=True) x = np.exp(x/temperature) @@ -660,7 +600,6 @@ def np_softmax(x, axis=-1, temperature=1.0): return x -@with_post_test_cleanup() def test_iadd(): a = nd.ones(LARGE_X) b = nd.ones(LARGE_X) @@ -670,7 +609,6 @@ def test_iadd(): assert c[-1] == 2 -@with_post_test_cleanup() def test_isub(): a = nd.full(LARGE_X, 3) b = nd.ones(LARGE_X) @@ -680,7 +618,6 @@ def test_isub(): assert c[-1] == 2 -@with_post_test_cleanup() def test_imul(): a = nd.full(LARGE_X, 3) b = nd.ones(LARGE_X) @@ -690,7 +627,6 @@ def test_imul(): assert c[-1] == 3 -@with_post_test_cleanup() def test_idiv(): a = nd.full(LARGE_X, 4) b = nd.full(LARGE_X, 2) @@ -700,7 +636,6 @@ def test_idiv(): assert c[-1] == 2 -@with_post_test_cleanup() def test_eq(): a = nd.full(LARGE_X, 3) b = nd.full(LARGE_X, 3) @@ -708,7 +643,6 @@ def test_eq(): assert (c.asnumpy() == 1).all() -@with_post_test_cleanup() def test_neq(): a = nd.full(LARGE_X, 2) b = nd.full(LARGE_X, 3) @@ -716,7 +650,6 @@ def test_neq(): assert (c.asnumpy() == 1).all() -@with_post_test_cleanup() def test_lt(): a = nd.full(LARGE_X, 2) b = nd.full(LARGE_X, 3) @@ -724,7 +657,6 @@ def test_lt(): assert (d.asnumpy() == 1).all() -@with_post_test_cleanup() def test_lte(): a = nd.full(LARGE_X, 2) b = nd.full(LARGE_X, 3) @@ -735,7 +667,6 @@ def test_lte(): assert (d.asnumpy() == 1).all() -@with_post_test_cleanup() def test_gt(): a = nd.full(LARGE_X, 3) b = nd.full(LARGE_X, 2) @@ -743,7 +674,6 @@ def test_gt(): assert (d.asnumpy() == 1).all() -@with_post_test_cleanup() def test_gte(): a = nd.full(LARGE_X, 3) b = nd.full(LARGE_X, 2) @@ -754,7 +684,6 @@ def test_gte(): assert (d.asnumpy() == 1).all() -@with_post_test_cleanup() def test_slice_like(): a = create_vector(size=LARGE_X) b = nd.ones(LARGE_X//2) @@ -764,7 +693,6 @@ def test_slice_like(): assert c[-1] == (LARGE_X // 2 - 1) -@with_post_test_cleanup() def test_slice_axis(): a = create_vector(size=LARGE_X) med = LARGE_X // 2 @@ -773,7 +701,6 @@ def test_slice_axis(): assert c[-1][0] == (med - 1) -@with_post_test_cleanup() def test_full(): a = nd.full(LARGE_X, 3) assert a.shape[0] == LARGE_X