From 0ba1ce207ae57a86ba596251c6f4581c9e356180 Mon Sep 17 00:00:00 2001 From: JiangZhaoh <54654391+JiangZhaoh@users.noreply.github.com> Date: Tue, 22 Oct 2019 11:36:15 +0800 Subject: [PATCH] [numpy]op test in new pattern (#16556) * all changes fix sanity problem change is_op_runnable's location Fix * Disable fp16 test since TVM op is not ready for this dtype * Assert equal for boolean ndarrays * Fix test util --- python/mxnet/numpy_dispatch_protocol.py | 8 ++ python/mxnet/test_utils.py | 3 + .../unittest/test_numpy_interoperability.py | 102 ++++++++++++++++++ 3 files changed, 113 insertions(+) diff --git a/python/mxnet/numpy_dispatch_protocol.py b/python/mxnet/numpy_dispatch_protocol.py index 3682334ebbea..6db44fad7780 100644 --- a/python/mxnet/numpy_dispatch_protocol.py +++ b/python/mxnet/numpy_dispatch_protocol.py @@ -117,6 +117,8 @@ def _run_with_array_ufunc_proto(*args, **kwargs): 'transpose', 'unique', 'var', + 'vdot', + 'vstack', 'zeros_like', 'linalg.norm', 'trace', @@ -214,6 +216,12 @@ def _register_array_function(): 'trunc', 'floor', 'logical_not', + 'equal', + 'not_equal', + 'less', + 'less_equal', + 'greater', + 'greater_equal' ] diff --git a/python/mxnet/test_utils.py b/python/mxnet/test_utils.py index 6ee37982a124..4862aee8570d 100644 --- a/python/mxnet/test_utils.py +++ b/python/mxnet/test_utils.py @@ -565,6 +565,9 @@ def assert_almost_equal(a, b, rtol=None, atol=None, names=('a', 'b'), equal_nan= b = b.asnumpy() if use_np_allclose: + if hasattr(a, 'dtype') and a.dtype == np.bool_ and hasattr(b, 'dtype') and b.dtype == np.bool_: + np.testing.assert_equal(a, b) + return if almost_equal(a, b, rtol, atol, equal_nan=equal_nan): return else: diff --git a/tests/python/unittest/test_numpy_interoperability.py b/tests/python/unittest/test_numpy_interoperability.py index e16be469aa3b..9e8156f3239c 100644 --- a/tests/python/unittest/test_numpy_interoperability.py +++ b/tests/python/unittest/test_numpy_interoperability.py @@ -32,6 +32,14 @@ _INT_DTYPES = [np.int8, np.int32, np.int64, np.uint8] _FLOAT_DTYPES = [np.float16, np.float32, np.float64] _DTYPES = _INT_DTYPES + _FLOAT_DTYPES +_TVM_OPS = [ + 'equal', + 'not_equal', + 'less', + 'less_equal', + 'greater', + 'greater_equal' +] class OpArgMngr(object): @@ -535,6 +543,13 @@ def _add_workload_roll(): def _add_workload_stack(array_pool): OpArgMngr.add_workload('stack', [array_pool['4x1']] * 2) + OpArgMngr.add_workload('stack', [array_pool['4x1']] * 2, 1) + OpArgMngr.add_workload('stack', [array_pool['4x1']] * 2, -1) + OpArgMngr.add_workload('stack', [array_pool['4x1']] * 2, -2) + OpArgMngr.add_workload('stack', np.random.normal(size=(2, 4, 3)), 2) + OpArgMngr.add_workload('stack', np.random.normal(size=(2, 4, 3)), -3) + OpArgMngr.add_workload('stack', np.array([[], [], []]), 1) + OpArgMngr.add_workload('stack', np.array([[], [], []])) def _add_workload_sum(): @@ -590,10 +605,22 @@ def _add_workload_unique(): def _add_workload_var(array_pool): OpArgMngr.add_workload('var', array_pool['4x1']) + OpArgMngr.add_workload('var', np.array([np.float16(1.)])) + OpArgMngr.add_workload('var', np.array([1])) + OpArgMngr.add_workload('var', np.array([1.])) + OpArgMngr.add_workload('var', np.array([[1, 2, 3], [4, 5, 6]])) + OpArgMngr.add_workload('var', np.array([[1, 2, 3], [4, 5, 6]]), 0) + OpArgMngr.add_workload('var', np.array([[1, 2, 3], [4, 5, 6]]), 1) + OpArgMngr.add_workload('var', np.array([np.nan])) + OpArgMngr.add_workload('var', np.array([1, -1, 1, -1])) + OpArgMngr.add_workload('var', np.array([1,2,3,4], dtype='f8')) def _add_workload_zeros_like(array_pool): OpArgMngr.add_workload('zeros_like', array_pool['4x1']) + OpArgMngr.add_workload('zeros_like', np.random.uniform(size=(3, 3)).astype(np.float64)) + OpArgMngr.add_workload('zeros_like', np.random.uniform(size=(3, 3)).astype(np.float32)) + OpArgMngr.add_workload('zeros_like', np.random.randint(2, size = (3, 3))) def _add_workload_outer(): @@ -933,6 +960,71 @@ def _add_workload_logical_not(array_pool): OpArgMngr.add_workload('logical_not', np.array([True, False, True, False], dtype=np.bool)) +def _add_workload_vdot(): + OpArgMngr.add_workload('vdot', np.random.normal(size=(2, 4)), np.random.normal(size=(4, 2))) + OpArgMngr.add_workload('vdot', np.random.normal(size=(2, 4)).astype(np.float64), np.random.normal(size=(2, 4)).astype(np.float64)) + + +def _add_workload_vstack(array_pool): + OpArgMngr.add_workload('vstack', (array_pool['4x1'], np.random.uniform(size=(5, 1)))) + OpArgMngr.add_workload('vstack', array_pool['4x1']) + OpArgMngr.add_workload('vstack', array_pool['1x1x0']) + + +def _add_workload_equal(array_pool): + # TODO(junwu): fp16 does not work yet with TVM generated ops + # OpArgMngr.add_workload('equal', np.array([0, 1, 2, 4, 2], dtype=np.float16), np.array([-2, 5, 1, 4, 3], dtype=np.float16)) + OpArgMngr.add_workload('equal', np.array([0, 1, 2, 4, 2], dtype=np.float32), np.array([-2, 5, 1, 4, 3], dtype=np.float32)) + # TODO(junwu): mxnet currently does not have a consistent behavior as NumPy in dealing with np.nan + # OpArgMngr.add_workload('equal', np.array([np.nan]), np.array([np.nan])) + OpArgMngr.add_workload('equal', array_pool['4x1'], array_pool['1x2']) + + +def _add_workload_not_equal(array_pool): + # TODO(junwu): fp16 does not work yet with TVM generated ops + # OpArgMngr.add_workload('not_equal', np.array([0, 1, 2, 4, 2], dtype=np.float16), np.array([-2, 5, 1, 4, 3], dtype=np.float16)) + OpArgMngr.add_workload('not_equal', np.array([0, 1, 2, 4, 2], dtype=np.float32), np.array([-2, 5, 1, 4, 3], dtype=np.float32)) + # TODO(junwu): mxnet currently does not have a consistent behavior as NumPy in dealing with np.nan + # OpArgMngr.add_workload('not_equal', np.array([np.nan]), np.array([np.nan])) + OpArgMngr.add_workload('not_equal', array_pool['4x1'], array_pool['1x2']) + + +def _add_workload_greater(array_pool): + # TODO(junwu): fp16 does not work yet with TVM generated ops + # OpArgMngr.add_workload('greater', np.array([0, 1, 2, 4, 2], dtype=np.float16), np.array([-2, 5, 1, 4, 3], dtype=np.float16)) + OpArgMngr.add_workload('greater', np.array([0, 1, 2, 4, 2], dtype=np.float32), np.array([-2, 5, 1, 4, 3], dtype=np.float32)) + OpArgMngr.add_workload('greater', array_pool['4x1'], array_pool['1x2']) + # TODO(junwu): mxnet currently does not have a consistent behavior as NumPy in dealing with np.nan + # OpArgMngr.add_workload('greater', np.array([np.nan]), np.array([np.nan])) + + +def _add_workload_greater_equal(array_pool): + # TODO(junwu): fp16 does not work yet with TVM generated ops + # OpArgMngr.add_workload('greater_equal', np.array([0, 1, 2, 4, 2], dtype=np.float16), np.array([-2, 5, 1, 4, 3], dtype=np.float16)) + OpArgMngr.add_workload('greater_equal', np.array([0, 1, 2, 4, 2], dtype=np.float32), np.array([-2, 5, 1, 4, 3], dtype=np.float32)) + OpArgMngr.add_workload('greater_equal', array_pool['4x1'], array_pool['1x2']) + # TODO(junwu): mxnet currently does not have a consistent behavior as NumPy in dealing with np.nan + # OpArgMngr.add_workload('greater_equal', np.array([np.nan]), np.array([np.nan])) + + +def _add_workload_less(array_pool): + # TODO(junwu): fp16 does not work yet with TVM generated ops + # OpArgMngr.add_workload('less', np.array([0, 1, 2, 4, 2], dtype=np.float16), np.array([-2, 5, 1, 4, 3], dtype=np.float16)) + OpArgMngr.add_workload('less', np.array([0, 1, 2, 4, 2], dtype=np.float32), np.array([-2, 5, 1, 4, 3], dtype=np.float32)) + OpArgMngr.add_workload('less', array_pool['4x1'], array_pool['1x2']) + # TODO(junwu): mxnet currently does not have a consistent behavior as NumPy in dealing with np.nan + # OpArgMngr.add_workload('less', np.array([np.nan]), np.array([np.nan])) + + +def _add_workload_less_equal(array_pool): + # TODO(junwu): fp16 does not work yet with TVM generated ops + # OpArgMngr.add_workload('less_equal', np.array([0, 1, 2, 4, 2], dtype=np.float16), np.array([-2, 5, 1, 4, 3], dtype=np.float16)) + OpArgMngr.add_workload('less_equal', np.array([0, 1, 2, 4, 2], dtype=np.float32), np.array([-2, 5, 1, 4, 3], dtype=np.float32)) + OpArgMngr.add_workload('less_equal', array_pool['4x1'], array_pool['1x2']) + # TODO(junwu): mxnet currently does not have a consistent behavior as NumPy in dealing with np.nan + # OpArgMngr.add_workload('less_equal', np.array([np.nan]), np.array([np.nan])) + + @use_np def _prepare_workloads(): array_pool = { @@ -1028,6 +1120,14 @@ def _prepare_workloads(): _add_workload_turnc(array_pool) _add_workload_floor(array_pool) _add_workload_logical_not(array_pool) + _add_workload_vdot() + _add_workload_vstack(array_pool) + _add_workload_equal(array_pool) + _add_workload_not_equal(array_pool) + _add_workload_greater(array_pool) + _add_workload_greater_equal(array_pool) + _add_workload_less(array_pool) + _add_workload_less_equal(array_pool) _prepare_workloads() @@ -1070,6 +1170,8 @@ def _check_interoperability_helper(op_name, *args, **kwargs): def check_interoperability(op_list): for name in op_list: + if name in _TVM_OPS and not is_op_runnable(): + continue print('Dispatch test:', name) workloads = OpArgMngr.get_workloads(name) assert workloads is not None, 'Workloads for operator `{}` has not been ' \