From fb9e53c30b26e94a9bbccd4c5f11ede477e2e058 Mon Sep 17 00:00:00 2001 From: Pedro Larroy Date: Tue, 6 Aug 2019 14:37:19 -0700 Subject: [PATCH] Reduce numerical error on numerical gradient calculations Fixes #11720 Overall will reduce flakiness of tests using numerical gradients --- python/mxnet/test_utils.py | 16 ++-------------- 1 file changed, 2 insertions(+), 14 deletions(-) diff --git a/python/mxnet/test_utils.py b/python/mxnet/test_utils.py index 0e260ceb7676..65d74163c530 100644 --- a/python/mxnet/test_utils.py +++ b/python/mxnet/test_utils.py @@ -906,18 +906,6 @@ def check_numeric_gradient(sym, location, aux_states=None, numeric_eps=1e-3, rto if ctx is None: ctx = default_context() - def random_projection(shape): - """Get a random weight matrix with not too small elements - - Parameters - ---------- - shape : list or tuple - """ - # random_projection should not have elements too small, - # otherwise too much precision is lost in numerical gradient - plain = np.random.rand(*shape) + 0.1 - return plain - location = _parse_location(sym=sym, location=location, ctx=ctx, dtype=dtype) location_npy = {k:v.asnumpy() for k, v in location.items()} aux_states = _parse_aux_states(sym=sym, aux_states=aux_states, ctx=ctx, @@ -941,11 +929,11 @@ def random_projection(shape): input_shape = {k: v.shape for k, v in location.items()} _, out_shape, _ = sym.infer_shape(**input_shape) proj = mx.sym.Variable("__random_proj") - out = sym * proj + out = sym + proj out = mx.sym.make_loss(out) location = dict(list(location.items()) + - [("__random_proj", mx.nd.array(random_projection(out_shape[0]), + [("__random_proj", mx.nd.random.uniform(shape=out_shape[0], ctx=ctx, dtype=dtype))]) args_grad_npy = dict([(k, np.random.normal(0, 0.01, size=location[k].shape)) for k in grad_nodes]