From 1200ccc388010955fd53a97b03f3b5b36034f188 Mon Sep 17 00:00:00 2001 From: Hao Jin Date: Tue, 17 Dec 2019 07:19:10 +0000 Subject: [PATCH] Implement atleast_1d/2d/3d --- python/mxnet/_numpy_op_doc.py | 107 ++++++++++++++++++ python/mxnet/numpy_dispatch_protocol.py | 3 + src/operator/numpy/np_init_op.cc | 71 ++++++++++++ src/operator/numpy/np_init_op.cu | 9 ++ src/operator/numpy/np_init_op.h | 23 ++++ .../unittest/test_numpy_interoperability.py | 25 ++++ tests/python/unittest/test_numpy_op.py | 54 +++++++++ 7 files changed, 292 insertions(+) diff --git a/python/mxnet/_numpy_op_doc.py b/python/mxnet/_numpy_op_doc.py index 0d0e3b64491b..79c52b02e778 100644 --- a/python/mxnet/_numpy_op_doc.py +++ b/python/mxnet/_numpy_op_doc.py @@ -408,6 +408,113 @@ def _np_copy(a, out=None): pass +def _np_atleast_1d(*arys): + """ + Convert inputs to arrays with at least one dimension. + + Scalar inputs are converted to 1-dimensional arrays, whilst higher-dimensional inputs are preserved. + + Parameters + ---------- + arys1, arys2, ... : ndarray + One or more input arrays. + + Returns + ------- + ret : ndarray + An array, or list of arrays, each with a.ndim >= 1. Copies are made only if necessary. + + See also + -------- + atleast_2d, atleast_3d + + Examples + -------- + >>> np.atleast_1d(1.0) + array([1.]) + >>> x = np.arange(9.0).reshape(3,3) + >>> np.atleast_1d(x) + array([[0., 1., 2.], + [3., 4., 5.], + [6., 7., 8.]]) + >>> np.atleast_1d(np.array(1), np.array([3, 4])) + [array([1.]), array([3., 4.])] + """ + pass + + +def _np_atleast_2d(*arys): + """ + Convert inputs to arrays with at least two dimensions. + + Parameters + ---------- + arys1, arys2, ... : ndarray + One or more input arrays. + + Returns + ------- + ret : ndarray + An array, or list of arrays, each with a.ndim >= 2. Copies are made only if necessary. + + See also + -------- + atleast_1d, atleast_3d + + Examples + -------- + >>> np.atleast_2d(3.0) + array([[3.]]) + >>> x = np.arange(3.0) + >>> np.atleast_2d(x) + array([[0., 1., 2.]]) + >>> np.atleast_2d(np.array(1), np.array([1, 2]), np.array([[1, 2]])) + [array([[1.]]), array([[1., 2.]]), array([[1., 2.]])] + """ + pass + +def _np_atleast_3d(*arys): + """ + Convert inputs to arrays with at least three dimension. + + Parameters + ---------- + arys1, arys2, ... : ndarray + One or more input arrays. + + Returns + ------- + ret : ndarray + An array, or list of arrays, each with a.ndim >= 3. + For example, a 1-D array of shape (N,) becomes a view of shape (1, N, 1), + and a 2-D array of shape (M, N) becomes a view of shape (M, N, 1). + + See also + -------- + atleast_1d, atleast_2d + + Examples + -------- + >>> np.atleast_3d(3.0) + array([[[3.]]]) + >>> x = np.arange(3.0) + >>> np.atleast_3d(x).shape + (1, 3, 1) + >>> x = np.arange(12.0).reshape(4,3) + >>> np.atleast_3d(x).shape + (4, 3, 1) + >>> for arr in np.atleast_3d(np.array([1, 2]), np.array([[1, 2]]), np.array([[[1, 2]]])): + ... print(arr, arr.shape) + ... + [[[1.] + [2.]]] (1, 2, 1) + [[[1.] + [2.]]] (1, 2, 1) + [[[1. 2.]]] (1, 1, 2) + """ + pass + + def _np_reshape(a, newshape, order='C', out=None): """ Gives a new shape to an array without changing its data. diff --git a/python/mxnet/numpy_dispatch_protocol.py b/python/mxnet/numpy_dispatch_protocol.py index e93720564774..469e984e8e6d 100644 --- a/python/mxnet/numpy_dispatch_protocol.py +++ b/python/mxnet/numpy_dispatch_protocol.py @@ -107,6 +107,9 @@ def _run_with_array_ufunc_proto(*args, **kwargs): 'min', 'nonzero', 'ones_like', + 'atleast_1d', + 'atleast_2d', + 'atleast_3d', 'prod', 'ravel', 'repeat', diff --git a/src/operator/numpy/np_init_op.cc b/src/operator/numpy/np_init_op.cc index ee52a2939e12..e6073bd2a22d 100644 --- a/src/operator/numpy/np_init_op.cc +++ b/src/operator/numpy/np_init_op.cc @@ -35,6 +35,7 @@ DMLC_REGISTER_PARAMETER(NumpyEyeParam); DMLC_REGISTER_PARAMETER(IndicesOpParam); DMLC_REGISTER_PARAMETER(LogspaceParam); DMLC_REGISTER_PARAMETER(FullLikeOpParam); +DMLC_REGISTER_PARAMETER(AtleastNDParam); inline bool NumpyIndicesShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector* in_shapes, @@ -98,6 +99,76 @@ NNVM_REGISTER_OP(_npi_identity) .set_attr("FCompute", IdentityCompute) .add_arguments(InitOpParam::__FIELDS__()); +template +inline bool AtleastNDShape(const nnvm::NodeAttrs& attrs, + std::vector *in_attrs, + std::vector *out_attrs) { + auto ¶m = nnvm::get(attrs.parsed); + + CHECK_EQ(in_attrs->size(), param.num_args); + CHECK_EQ(out_attrs->size(), param.num_args); + + for (int i = 0; i < param.num_args; ++i) { + auto &shape = in_attrs->at(i); + if (shape.ndim() < NDim) { + mxnet::TShape new_shape(NDim, 1); + if (NDim == 2) { + if (shape.ndim() == 1) { + new_shape[1] = shape[0]; + } + } else if (NDim == 3) { + if (shape.ndim() == 1) { + new_shape[1] = shape[0]; + } else if (shape.ndim() == 2) { + new_shape[0] = shape[0]; + new_shape[1] = shape[1]; + } + } + SHAPE_ASSIGN_CHECK(*out_attrs, i, new_shape); + } else { + SHAPE_ASSIGN_CHECK(*out_attrs, i, shape); + } + } + + return shape_is_known(*in_attrs) && shape_is_known(*out_attrs); +} + +#define NNVM_REGISTER_ATLEAST_ND(N) \ +NNVM_REGISTER_OP(_np_atleast_##N##d) \ +.set_attr_parser(ParamParser) \ +.set_num_inputs( \ +[](const NodeAttrs& attrs) { \ + auto ¶m = nnvm::get(attrs.parsed); \ + return param.num_args; \ +}) \ +.set_num_outputs( \ +[](const NodeAttrs& attrs) { \ + auto ¶m = nnvm::get(attrs.parsed); \ + return param.num_args; \ +}) \ +.set_attr("key_var_num_args", "num_args") \ +.set_attr("FListInputNames", \ +[](const nnvm::NodeAttrs& attrs) { \ + int num_args = nnvm::get(attrs.parsed).num_args; \ + std::vector ret; \ + for (int i = 0; i < num_args; i++) { \ + ret.push_back(std::string("ary") + std::to_string(i)); \ + } \ + return ret; \ +}) \ +.set_attr("FInferType", ElemwiseType<-1, -1>) \ +.set_attr("FInferShape", AtleastNDShape) \ +.set_attr("FGradient", MakeZeroGradNodes) \ +.set_attr("FCompute", AtleastNDCompute) \ +.add_argument("arys", "NDArray-or-Symbol[]", "List of input arrays") \ +.add_arguments(AtleastNDParam::__FIELDS__()) \ + +NNVM_REGISTER_ATLEAST_ND(1); + +NNVM_REGISTER_ATLEAST_ND(2); + +NNVM_REGISTER_ATLEAST_ND(3); + NNVM_REGISTER_OP(_npi_full_like) .set_num_inputs(1) .set_num_outputs(1) diff --git a/src/operator/numpy/np_init_op.cu b/src/operator/numpy/np_init_op.cu index 5095fe60bdef..886bed61ec66 100644 --- a/src/operator/numpy/np_init_op.cu +++ b/src/operator/numpy/np_init_op.cu @@ -41,6 +41,15 @@ NNVM_REGISTER_OP(_npi_identity) NNVM_REGISTER_OP(_npi_full_like) .set_attr("FCompute", FullLikeOpCompute); +NNVM_REGISTER_OP(_np_atleast_1d) +.set_attr("FCompute", AtleastNDCompute); + +NNVM_REGISTER_OP(_np_atleast_2d) +.set_attr("FCompute", AtleastNDCompute); + +NNVM_REGISTER_OP(_np_atleast_3d) +.set_attr("FCompute", AtleastNDCompute); + NNVM_REGISTER_OP(_npi_arange) .set_attr("FCompute", RangeCompute); diff --git a/src/operator/numpy/np_init_op.h b/src/operator/numpy/np_init_op.h index df30d611aa02..cfc2941ecd28 100644 --- a/src/operator/numpy/np_init_op.h +++ b/src/operator/numpy/np_init_op.h @@ -272,6 +272,29 @@ void LogspaceCompute(const nnvm::NodeAttrs& attrs, }); } +struct AtleastNDParam : dmlc::Parameter { + int num_args; + DMLC_DECLARE_PARAMETER(AtleastNDParam) { + DMLC_DECLARE_FIELD(num_args) + .set_lower_bound(1) + .describe("Number of input arrays."); + } +}; + +template +void AtleastNDCompute(const nnvm::NodeAttrs& attrs, + const OpContext& ctx, + const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { + auto ¶m = nnvm::get(attrs.parsed); + CHECK_EQ(inputs.size(), param.num_args); + CHECK_EQ(outputs.size(), param.num_args); + for (int i = 0; i < param.num_args; ++i) { + UnaryOp::IdentityCompute(attrs, ctx, {inputs[i]}, {req[i]}, {outputs[i]}); + } +} + } // namespace op } // namespace mxnet diff --git a/tests/python/unittest/test_numpy_interoperability.py b/tests/python/unittest/test_numpy_interoperability.py index a670f794860f..e1dfacbfd570 100644 --- a/tests/python/unittest/test_numpy_interoperability.py +++ b/tests/python/unittest/test_numpy_interoperability.py @@ -700,6 +700,30 @@ def _add_workload_ones_like(array_pool): OpArgMngr.add_workload('ones_like', array_pool['4x1']) +def _add_workload_atleast_nd(): + a_0 = np.array(1) + b_0 = np.array(2) + a_1 = np.array([1, 2]) + b_1 = np.array([2, 3]) + a_2 = np.array([[1, 2], [1, 2]]) + b_2 = np.array([[2, 3], [2, 3]]) + a_3 = [a_2, a_2] + b_3 = [b_2, b_2] + + OpArgMngr.add_workload('atleast_1d', a_0, b_0) + OpArgMngr.add_workload('atleast_1d', a_1, b_1) + OpArgMngr.add_workload('atleast_1d', a_2, b_2) + OpArgMngr.add_workload('atleast_1d', a_3, b_3) + OpArgMngr.add_workload('atleast_2d', a_0, b_0) + OpArgMngr.add_workload('atleast_2d', a_1, b_1) + OpArgMngr.add_workload('atleast_2d', a_2, b_2) + OpArgMngr.add_workload('atleast_2d', a_3, b_3) + OpArgMngr.add_workload('atleast_3d', a_0, b_0) + OpArgMngr.add_workload('atleast_3d', a_1, b_1) + OpArgMngr.add_workload('atleast_3d', a_2, b_2) + OpArgMngr.add_workload('atleast_3d', a_3, b_3) + + def _add_workload_prod(array_pool): OpArgMngr.add_workload('prod', array_pool['4x1']) @@ -1429,6 +1453,7 @@ def _prepare_workloads(): _add_workload_mean(array_pool) _add_workload_nonzero() _add_workload_ones_like(array_pool) + _add_workload_atleast_nd() _add_workload_prod(array_pool) _add_workload_repeat(array_pool) _add_workload_reshape() diff --git a/tests/python/unittest/test_numpy_op.py b/tests/python/unittest/test_numpy_op.py index 545466bf0814..a943aa6d1a40 100644 --- a/tests/python/unittest/test_numpy_op.py +++ b/tests/python/unittest/test_numpy_op.py @@ -2121,6 +2121,60 @@ def hybrid_forward(self, F, a): assert_almost_equal(mx_out.asnumpy(), np_out, rtol=1e-3, atol=1e-5) +@with_seed() +@use_np +def test_np_atleast_nd(): + class TestAtleastND(HybridBlock): + def __init__(self, n): + super(TestAtleastND, self).__init__() + self._n = n + + def hybrid_forward(self, F, *arys): + if self._n == 1: + return F.np.atleast_1d(*arys) + elif self._n == 2: + return F.np.atleast_2d(*arys) + elif self._n == 3: + return F.np.atleast_3d(*arys) + + tensor_shapes = [ + ((), (2,), (3, 4, 5)), + ((2, 3, 4, 5), (), (2, 3)) + ] + flags = [True, False] + ns = [1, 2, 3] + dtypes = ['int32', 'int64', 'float16', 'float32', 'float64'] + funcs = { + "numpy": {1: lambda *ts: _np.atleast_1d(*ts), + 2: lambda *ts: _np.atleast_2d(*ts), + 3: lambda *ts: _np.atleast_3d(*ts)}, + "mxnet": {1: lambda *ts: np.atleast_1d(*ts), + 2: lambda *ts: np.atleast_2d(*ts), + 3: lambda *ts: np.atleast_3d(*ts)} + } + for hybridize, n, tensor_shape, dtype in \ + itertools.product(flags, ns, tensor_shapes, dtypes): + test_atleast_nd = TestAtleastND(n) + if hybridize: + test_atleast_nd.hybridize() + if dtype in ['int32', 'int64']: + tensors = list(map(lambda s: np.random.randint(-1, 1, size=s, dtype=dtype), tensor_shape)) + else: + tensors = list(map(lambda s: np.random.uniform(-1.0, 1.0, size=s, dtype=dtype), tensor_shape)) + tensors_np = [t.asnumpy() for t in tensors] + mx_out = test_atleast_nd(*tensors) + np_out = funcs["numpy"][n](*tensors_np) + for i in range(len(tensors)): + assert mx_out[i].shape == np_out[i].shape + same(mx_out[i].asnumpy(), np_out[i]) + + mx_out = funcs["mxnet"][n](*tensors) + np_out = funcs["numpy"][n](*tensors_np) + for i in range(len(tensors)): + assert mx_out[i].shape == np_out[i].shape + same(mx_out[i].asnumpy(), np_out[i]) + + @with_seed() @use_np def test_np_arange():