diff --git a/python/mxnet/test_utils.py b/python/mxnet/test_utils.py index 5b63628db6a7..4171b86ebcc1 100644 --- a/python/mxnet/test_utils.py +++ b/python/mxnet/test_utils.py @@ -1096,7 +1096,8 @@ def tile_dict(dict, keep_stype=False): symbolic_grads = {k:v.asnumpy()[0] if v.stype != 'csr' else v[0] for k, v in symbolic_grads.items()} - location = {k:v.asnumpy() for k, v in location.items()} + for k in location: + location[k] = location[k].asnumpy() numeric_gradients = numeric_grad( executor, location, grad_nodes, aux_states, use_approx_grad=use_approx_grad, diff --git a/tests/python/gpu/test_operator_gpu.py b/tests/python/gpu/test_operator_gpu.py index 7a7c6f69dd77..8d10777a7c0d 100644 --- a/tests/python/gpu/test_operator_gpu.py +++ b/tests/python/gpu/test_operator_gpu.py @@ -2025,6 +2025,10 @@ def test_bilinear_sampler_versions(): assert_almost_equal(exe.grad_dict['grid'].asnumpy(), exe_list[ref_idx].grad_dict['grid'].asnumpy(), rtol=1e-3, atol=1e-5) +@with_seed() +def test_allclose_function_gpu(): + allclose_function([mx.cpu(), mx.gpu(0)]) + def test_context_num_gpus(): # Test that num_gpus reports at least one GPU, as the test is run on a GPU host. assert mx.context.num_gpus() > 0 diff --git a/tests/python/unittest/test_operator.py b/tests/python/unittest/test_operator.py index 1afa7dea1606..b622b6e5351c 100644 --- a/tests/python/unittest/test_operator.py +++ b/tests/python/unittest/test_operator.py @@ -6795,32 +6795,40 @@ def f(x, a, b, c): # check backward using finite difference check_numeric_gradient(quad_sym, [data_np], atol=0.001) -@with_seed() -def test_allclose_function(): +def allclose_function(contexts=None): def getRandom(base, percent = 1.): return base * (1 + percent * (2 * np.random.random_sample() - 1.) / 100) + if contexts is None: + contexts = [default_context()] + + title = 'exp' + for ctx in contexts: + title += ' cpu' if ctx == mx.cpu() else ' gpu' + + title += ' nElem shape' + num_ctx = len(contexts) result = [False, False] for dtype in [np.float16, np.float32, np.float64]: rtol = getRandom(1e-2 if dtype is np.float16 else 1e-5) atol = getRandom(1e-4 if dtype is np.float16 else 1e-7) - print('\n{} atol = {} rtol = {}'.format(dtype, atol, rtol)) - print('exp cpu gpu nElem shape') + print('\nnumpy.{}: atol = {} rtol = {}'.format(dtype.__name__, atol, rtol)) + print(title) for ndim in range(1, 10): shape = rand_shape_nd(ndim, 8) a_np = np.random.randn(*shape).astype(dtype) b_np = (a_np + np.random.randn(*shape).astype(dtype) / 10000000).astype(dtype) expected = np.allclose(a_np, b_np, rtol, atol) - for i, ctx in enumerate([mx.cpu(), mx.gpu(0)]): + for n, ctx in enumerate(contexts): a_ctx = mx.nd.array(a_np, dtype = dtype, ctx=ctx) b_ctx = mx.nd.array(b_np, dtype = dtype, ctx=ctx) output = mx.nd.contrib.allclose(a_ctx, b_ctx, rtol=rtol, atol=atol) - result[i] = output.asnumpy() == 1 - if expected != result[i]: + result[n] = output.asnumpy() == 1 + if expected != result[n]: # Preparing the output of elements of the array, which are considered as "not close" AND # corresponding elements of comparison CPU/GPU/Python vectors, which are considered as "close" - v_ctx = 'CPU' if i == 0 else 'GPU' + v_ctx = 'CPU' if ctx == mx.cpu() else 'GPU' if expected: v_cmp = 'Python' a_b = a_ctx.asnumpy() @@ -6855,9 +6863,18 @@ def getRandom(base, percent = 1.): diff = np.abs(a_values[j][i]-b_values[j][i]) - atol + rtol*abs(b_values[j][i]) print('{}: {} {} {}'.format('%6s'%v_ctx, a_values[j][i], b_values[j][i], diff)) - print(' {0:d} {1:d} {2:d} {3:10d} {4:}'.format(expected, result[0], result[1], np.prod(shape), shape)) - if expected != result[0] or expected != result[1]: - assert(False) + + if num_ctx == 1: + print(' {0:d} {1:d} {2:10d} {3:}'.format(expected, result[0], np.prod(shape), shape)) + else: + print(' {0:d} {1:d} {2:d} {3:10d} {4:}'.format(expected, result[0], result[1], np.prod(shape), shape)) + + if expected != result[0] or num_ctx > 1 and expected != result[1]: + assert False + +@with_seed() +def test_allclose_function(): + allclose_function() @with_seed() def test_histogram():