From 0e7b98bb3f621920c9db9075bf5f1a1456d5d3fe Mon Sep 17 00:00:00 2001 From: JiangZhaoh Date: Wed, 20 Nov 2019 05:49:17 +0000 Subject: [PATCH] add op insert fix lint and compile error fix pylint error --- python/mxnet/ndarray/numpy/_op.py | 100 ++- python/mxnet/numpy/multiarray.py | 83 ++- python/mxnet/numpy_dispatch_protocol.py | 1 + python/mxnet/symbol/numpy/_symbol.py | 100 ++- src/operator/numpy/np_insert_op-inl.h | 630 ++++++++++++++++++ src/operator/numpy/np_insert_op.cc | 166 +++++ src/operator/numpy/np_insert_op.cu | 35 + .../unittest/test_numpy_interoperability.py | 13 + tests/python/unittest/test_numpy_op.py | 102 +++ 9 files changed, 1225 insertions(+), 5 deletions(-) create mode 100644 src/operator/numpy/np_insert_op-inl.h create mode 100644 src/operator/numpy/np_insert_op.cc create mode 100644 src/operator/numpy/np_insert_op.cu diff --git a/python/mxnet/ndarray/numpy/_op.py b/python/mxnet/ndarray/numpy/_op.py index 3cc5b85c8384..3a33a2a8a76c 100644 --- a/python/mxnet/ndarray/numpy/_op.py +++ b/python/mxnet/ndarray/numpy/_op.py @@ -21,7 +21,7 @@ from __future__ import absolute_import import numpy as _np -from ...base import numeric_types +from ...base import numeric_types, integer_types from ...util import _sanity_check_params, set_module from ...util import wrap_np_unary_func, wrap_np_binary_func from ...context import current_context @@ -29,7 +29,7 @@ from ..ndarray import NDArray __all__ = ['shape', 'zeros', 'ones', 'full', 'add', 'subtract', 'multiply', 'divide', 'mod', 'remainder', 'power', - 'arctan2', 'sin', 'cos', 'tan', 'sinh', 'cosh', 'tanh', 'log10', 'sqrt', 'cbrt', 'abs', + 'arctan2', 'sin', 'cos', 'tan', 'sinh', 'cosh', 'tanh', 'log10', 'sqrt', 'cbrt', 'abs', 'insert', 'absolute', 'exp', 'expm1', 'arcsin', 'arccos', 'arctan', 'sign', 'log', 'degrees', 'log2', 'log1p', 'rint', 'radians', 'reciprocal', 'square', 'negative', 'fix', 'ceil', 'floor', 'trunc', 'logical_not', 'arcsinh', 'arccosh', 'arctanh', 'tensordot', 'histogram', 'eye', @@ -381,6 +381,102 @@ def take(a, indices, axis=None, mode='raise', out=None): # pylint: enable=redefined-outer-name +@set_module('mxnet.ndarray.numpy') +def insert(arr, obj, values, axis=None): + """ + Insert values along the given axis before the given indices. + + Parameters + ---------- + arr : ndarray + Input array. + obj : int, slice or ndarray of ints + Object that defines the index or indices before which `values` is + inserted. + Support for multiple insertions when `obj` is a single scalar or a + sequence with one element (only support int32 and int64 element). + values : ndarray + Values to insert into `arr`. + The type of `values` should equal to the type of `arr`. + `values` should be shaped so that ``arr[...,obj,...] = values`` + is legal. + axis : int, optional + Axis along which to insert `values`. If `axis` is None then `arr` + is flattened first. + + Returns + ------- + out : ndarray + A copy of `arr` with `values` inserted. Note that `insert` + does not occur in-place: a new array is returned. If + `axis` is None, `out` is a flattened array. + + Notes + ----- + Note that for higher dimensional inserts `obj=0` behaves very different + from `obj=[0]` just like `arr[:,0,:] = values` is different from + `arr[:,[0],:] = values`. + + Examples + -------- + >>> a = np.array([[1, 1], [2, 2], [3, 3]]) + >>> a + array([[1., 1.], + [2., 2.], + [3., 3.]]) + >>> np.insert(a, 1, np.array(5)) + array([1., 5., 1., 2., 2., 3., 3.]) + >>> np.insert(a, 1, np.array(5), axis=1) + array([[1., 5., 1.], + [2., 5., 2.], + [3., 5., 3.]]) + + Difference between sequence and scalars: + + >>> np.insert(a, np.array([1], dtype=np.int32), np.array([[1],[2],[3]]), axis=1) + array([[1., 1., 1.], + [2., 2., 2.], + [3., 3., 3.]]) + >>> np.insert(a, 1, np.array([1, 2, 3]), axis=1) + array([[1., 1., 1.], + [2., 2., 2.], + [3., 3., 3.]]) + + >>> b = a.flatten() + >>> b + array([1., 1., 2., 2., 3., 3.]) + >>> np.insert(b, np.array([2, 2], dtype=np.int64), np.array([5, 6])) + array([1., 1., 5., 6., 2., 2., 3., 3.]) + + >>> np.insert(b, slice(2, 4), np.array([5, 6])) + array([1., 1., 5., 2., 6., 2., 3., 3.]) + + >>> np.insert(b, np.array([2, 2], dtype=np.int32), np.array([7.13, False])) + array([1. , 1. , 7.13, 0. , 2. , 2. , 3. , 3. ]) + + >>> x = np.arange(8).reshape(2, 4) + >>> idx = np.array([1, 3], dtype=np.int32) + >>> np.insert(x, idx, np.array([999]), axis=1) + array([[ 0., 999., 1., 2., 999., 3.], + [ 4., 999., 5., 6., 999., 7.]]) + """ + if not isinstance(arr, NDArray): + raise TypeError("'arr' can not support type {}".format(str(type(arr)))) + if not isinstance(values, NDArray): + raise TypeError("'values' can not support type {}".format(str(type(values)))) + if isinstance(obj, slice): + start = 0 if obj.start is None else obj.start + stop = obj.stop + step = 1 if obj.step is None else obj.step + return _npi.insert(arr, values, start=start, stop=stop, step=step, axis=axis) + elif isinstance(obj, integer_types): + return _npi.insert(arr, values, int_ind=obj, axis=axis) + elif isinstance(obj, NDArray): + return _npi.insert(arr, values, obj, axis=axis) + else: + raise TypeError("'obj' can not support type {}".format(str(type(obj)))) + + #pylint: disable= too-many-arguments, no-member, protected-access def _ufunc_helper(lhs, rhs, fn_array, fn_scalar, lfn_scalar, rfn_scalar=None, out=None): """ Helper function for element-wise operation. diff --git a/python/mxnet/numpy/multiarray.py b/python/mxnet/numpy/multiarray.py index e94d4c8341b4..ee3d6e53db59 100644 --- a/python/mxnet/numpy/multiarray.py +++ b/python/mxnet/numpy/multiarray.py @@ -49,7 +49,7 @@ __all__ = ['ndarray', 'empty', 'array', 'shape', 'zeros', 'ones', 'full', 'add', 'subtract', 'multiply', 'divide', 'mod', 'remainder', 'power', 'arctan2', 'sin', 'cos', 'tan', 'sinh', 'cosh', 'tanh', 'log10', 'sqrt', 'cbrt', 'abs', 'absolute', 'exp', 'expm1', 'arcsin', 'arccos', 'arctan', 'sign', 'log', - 'degrees', 'log2', 'log1p', 'rint', 'radians', 'reciprocal', 'square', 'negative', + 'degrees', 'log2', 'log1p', 'rint', 'radians', 'reciprocal', 'square', 'negative', 'insert', 'fix', 'ceil', 'floor', 'trunc', 'logical_not', 'arcsinh', 'arccosh', 'arctanh', 'append', 'tensordot', 'histogram', 'eye', 'linspace', 'logspace', 'expand_dims', 'tile', 'arange', 'split', 'vsplit', 'concatenate', 'stack', 'vstack', 'column_stack', 'dstack', 'mean', 'maximum', 'minimum', @@ -7030,6 +7030,87 @@ def einsum(*operands, **kwargs): return _mx_nd_np.einsum(*operands, **kwargs) +@set_module('mxnet.numpy') +def insert(arr, obj, values, axis=None): + """ + Insert values along the given axis before the given indices. + + Parameters + ---------- + arr : ndarray + Input array. + obj : int, slice or ndarray of ints + Object that defines the index or indices before which `values` is + inserted. + Support for multiple insertions when `obj` is a single scalar or a + sequence with one element (only support int32 and int64 element). + values : ndarray + Values to insert into `arr`. + The type of `values` should equal to the type of `arr`. + `values` should be shaped so that ``arr[...,obj,...] = values`` + is legal. + axis : int, optional + Axis along which to insert `values`. If `axis` is None then `arr` + is flattened first. + + Returns + ------- + out : ndarray + A copy of `arr` with `values` inserted. Note that `insert` + does not occur in-place: a new array is returned. If + `axis` is None, `out` is a flattened array. + + Notes + ----- + Note that for higher dimensional inserts `obj=0` behaves very different + from `obj=[0]` just like `arr[:,0,:] = values` is different from + `arr[:,[0],:] = values`. + + Examples + -------- + >>> a = np.array([[1, 1], [2, 2], [3, 3]]) + >>> a + array([[1., 1.], + [2., 2.], + [3., 3.]]) + >>> np.insert(a, 1, np.array(5)) + array([1., 5., 1., 2., 2., 3., 3.]) + >>> np.insert(a, 1, np.array(5), axis=1) + array([[1., 5., 1.], + [2., 5., 2.], + [3., 5., 3.]]) + + Difference between sequence and scalars: + + >>> np.insert(a, np.array([1], dtype=np.int32), np.array([[1],[2],[3]]), axis=1) + array([[1., 1., 1.], + [2., 2., 2.], + [3., 3., 3.]]) + >>> np.insert(a, 1, np.array([1, 2, 3]), axis=1) + array([[1., 1., 1.], + [2., 2., 2.], + [3., 3., 3.]]) + + >>> b = a.flatten() + >>> b + array([1., 1., 2., 2., 3., 3.]) + >>> np.insert(b, np.array([2, 2], dtype=np.int64), np.array([5, 6])) + array([1., 1., 5., 6., 2., 2., 3., 3.]) + + >>> np.insert(b, slice(2, 4), np.array([5, 6])) + array([1., 1., 5., 2., 6., 2., 3., 3.]) + + >>> np.insert(b, np.array([2, 2], dtype=np.int32), np.array([7.13, False])) + array([1. , 1. , 7.13, 0. , 2. , 2. , 3. , 3. ]) + + >>> x = np.arange(8).reshape(2, 4) + >>> idx = np.array([1, 3], dtype=np.int32) + >>> np.insert(x, idx, np.array([999]), axis=1) + array([[ 0., 999., 1., 2., 999., 3.], + [ 4., 999., 5., 6., 999., 7.]]) + """ + return _mx_nd_np.insert(arr, obj, values, axis=axis) + @set_module('mxnet.numpy') def nonzero(a): """ diff --git a/python/mxnet/numpy_dispatch_protocol.py b/python/mxnet/numpy_dispatch_protocol.py index f58159303d0f..fce0ad8ce3da 100644 --- a/python/mxnet/numpy_dispatch_protocol.py +++ b/python/mxnet/numpy_dispatch_protocol.py @@ -99,6 +99,7 @@ def _run_with_array_ufunc_proto(*args, **kwargs): 'fix', 'flip', 'inner', + 'insert', 'max', 'mean', 'min', diff --git a/python/mxnet/symbol/numpy/_symbol.py b/python/mxnet/symbol/numpy/_symbol.py index 7da771966f1f..fbc2bf5124ec 100644 --- a/python/mxnet/symbol/numpy/_symbol.py +++ b/python/mxnet/symbol/numpy/_symbol.py @@ -22,7 +22,7 @@ import ctypes import numpy as _np from . import _op as _mx_np_op -from ...base import _LIB, SymbolHandle, numeric_types, mx_uint +from ...base import _LIB, SymbolHandle, numeric_types, mx_uint, integer_types from ...util import check_call, set_module, _sanity_check_params from ...util import wrap_np_unary_func, wrap_np_binary_func from ...context import current_context @@ -33,7 +33,7 @@ __all__ = ['zeros', 'ones', 'add', 'subtract', 'multiply', 'divide', 'mod', 'remainder', 'power', 'arctan2', 'sin', 'cos', 'tan', 'sinh', 'cosh', 'tanh', 'log10', 'sqrt', 'cbrt', 'abs', 'absolute', 'exp', 'expm1', 'arcsin', 'arccos', 'arctan', 'sign', 'log', 'degrees', 'log2', 'log1p', - 'rint', 'radians', 'reciprocal', 'square', 'negative', 'fix', 'ceil', 'floor', + 'rint', 'radians', 'reciprocal', 'square', 'negative', 'fix', 'ceil', 'floor', 'insert', 'trunc', 'logical_not', 'arcsinh', 'arccosh', 'arctanh', 'tensordot', 'histogram', 'eye', 'linspace', 'logspace', 'expand_dims', 'tile', 'arange', 'split', 'vsplit', 'concatenate', 'append', 'stack', 'vstack', 'column_stack', 'dstack', 'mean', 'maximum', 'minimum', 'swapaxes', 'clip', 'argmax', @@ -2487,6 +2487,102 @@ def ceil(x, out=None, **kwargs): return _unary_func_helper(x, _npi.ceil, _np.ceil, out=out, **kwargs) +@set_module('mxnet.symbol.numpy') +def insert(arr, obj, values, axis=None): + """ + Insert values along the given axis before the given indices. + + Parameters + ---------- + arr : ndarray + Input array. + obj : int, slice or ndarray of ints + Object that defines the index or indices before which `values` is + inserted. + Support for multiple insertions when `obj` is a single scalar or a + sequence with one element (only support int32 and int64 element). + values : ndarray + Values to insert into `arr`. + The type of `values` should equal to the type of `arr`. + `values` should be shaped so that ``arr[...,obj,...] = values`` + is legal. + axis : int, optional + Axis along which to insert `values`. If `axis` is None then `arr` + is flattened first. + + Returns + ------- + out : ndarray + A copy of `arr` with `values` inserted. Note that `insert` + does not occur in-place: a new array is returned. If + `axis` is None, `out` is a flattened array. + + Notes + ----- + Note that for higher dimensional inserts `obj=0` behaves very different + from `obj=[0]` just like `arr[:,0,:] = values` is different from + `arr[:,[0],:] = values`. + + Examples + -------- + >>> a = np.array([[1, 1], [2, 2], [3, 3]]) + >>> a + array([[1., 1.], + [2., 2.], + [3., 3.]]) + >>> np.insert(a, 1, np.array(5)) + array([1., 5., 1., 2., 2., 3., 3.]) + >>> np.insert(a, 1, np.array(5), axis=1) + array([[1., 5., 1.], + [2., 5., 2.], + [3., 5., 3.]]) + + Difference between sequence and scalars: + + >>> np.insert(a, np.array([1], dtype=np.int32), np.array([[1],[2],[3]]), axis=1) + array([[1., 1., 1.], + [2., 2., 2.], + [3., 3., 3.]]) + >>> np.insert(a, 1, np.array([1, 2, 3]), axis=1) + array([[1., 1., 1.], + [2., 2., 2.], + [3., 3., 3.]]) + + >>> b = a.flatten() + >>> b + array([1., 1., 2., 2., 3., 3.]) + >>> np.insert(b, np.array([2, 2], dtype=np.int64), np.array([5, 6])) + array([1., 1., 5., 6., 2., 2., 3., 3.]) + + >>> np.insert(b, slice(2, 4), np.array([5, 6])) + array([1., 1., 5., 2., 6., 2., 3., 3.]) + + >>> np.insert(b, np.array([2, 2], dtype=np.int32), np.array([7.13, False])) + array([1. , 1. , 7.13, 0. , 2. , 2. , 3. , 3. ]) + + >>> x = np.arange(8).reshape(2, 4) + >>> idx = np.array([1, 3], dtype=np.int32) + >>> np.insert(x, idx, np.array([999]), axis=1) + array([[ 0., 999., 1., 2., 999., 3.], + [ 4., 999., 5., 6., 999., 7.]]) + """ + if not isinstance(arr, ndarray): # pylint: disable= undefined-variable + raise TypeError("'arr' can not support type {}".format(str(type(arr)))) + if not isinstance(values, ndarray): # pylint: disable= undefined-variable + raise TypeError("'values' can not support type {}".format(str(type(values)))) + if isinstance(obj, slice): + start = 0 if obj.start is None else obj.start + stop = obj.stop + step = 1 if obj.step is None else obj.step + return _npi.insert(arr, values, start=start, stop=stop, step=step, axis=axis) + elif isinstance(obj, integer_types): + return _npi.insert(arr, values, int_ind=obj, axis=axis) + elif isinstance(obj, Symbol): + return _npi.insert(arr, values, obj, axis=axis) + else: + raise TypeError("'obj' can not support type {}".format(str(type(obj)))) + + @set_module('mxnet.symbol.numpy') @wrap_np_unary_func def floor(x, out=None, **kwargs): diff --git a/src/operator/numpy/np_insert_op-inl.h b/src/operator/numpy/np_insert_op-inl.h new file mode 100644 index 000000000000..aa8df33e6e1b --- /dev/null +++ b/src/operator/numpy/np_insert_op-inl.h @@ -0,0 +1,630 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/*! + * Copyright (c) 2019 by Contributors + * \file np_insert_op-inl.h + * \brief Function definition of insert operators + */ +#ifndef MXNET_OPERATOR_NUMPY_NP_INSERT_OP_INL_H_ +#define MXNET_OPERATOR_NUMPY_NP_INSERT_OP_INL_H_ + +#include +#include +#include "../../common/utils.h" +#include "../tensor/sort_op.h" +#include "../operator_common.h" + +namespace mxnet { +namespace op { + +struct NumpyInsertParam : public dmlc::Parameter { + dmlc::optional start; + dmlc::optional stop; + dmlc::optional step; + dmlc::optional int_ind; + dmlc::optional axis; + DMLC_DECLARE_PARAMETER(NumpyInsertParam) { + DMLC_DECLARE_FIELD(start) + .set_default(dmlc::optional()) + .describe("If 'obj' is slice, 'start' is one of it's arguments."); + DMLC_DECLARE_FIELD(stop) + .set_default(dmlc::optional()) + .describe("If 'obj' is slice, 'stop' is one of it's arguments."); + DMLC_DECLARE_FIELD(step) + .set_default(dmlc::optional()) + .describe("If 'obj' is slice, 'step' is one of it's arguments."); + DMLC_DECLARE_FIELD(int_ind) + .set_default(dmlc::optional()) + .describe("If 'obj' is int, 'int_ind' is the index before which" + "'values' is inserted"); + DMLC_DECLARE_FIELD(axis) + .set_default(dmlc::optional()) + .describe("Axis along which to insert `values`."); + } +}; + +namespace insert_ { +enum InsertOpInputs {kArr, kValues, kObj}; +enum InsertOpOutputs {kOut}; +} // namespace insert_ + +template +struct InsertZeroNdimForward { + template + MSHADOW_XINLINE static void Map(int i, DType* out_data, const DType* in_data) { + KERNEL_ASSIGN(out_data[i], req, in_data[i]); + } +}; + +template +struct InsertSingleIndexForward { + template + MSHADOW_XINLINE static void Map(int i, DType* out_data, + const DType* in_val, const DType* in_arr, + const mshadow::Shape<10> outshape, + const mshadow::Shape<10> valshape, + const int index, const int numnew, + const mshadow::Shape<10> val_stride, + const mshadow::Shape<10> old_val_stride, + const mshadow::Shape<10> arr_stride, + const mshadow::Shape<10> out_stride, + const int arr_ndim, const int val_ndim, + const int out_ndim, const int axis, + bool moveaxis) { + const int64_t out_head = i / out_stride[axis]; + const int64_t out_mid = out_head % outshape[axis]; + mshadow::Shape<10> out_idx; // i -> position in output's shape + for (int j = 0; j < out_ndim; ++j) { + const int64_t head = i / out_stride[j]; + const int64_t mid = head % outshape[j]; + out_idx[j] = mid; + } + int64_t dest_idx; + if (out_mid >= index && out_mid < index + numnew) { + int idx_val = out_mid - index; + mshadow::Shape<10> val_idx(out_idx); // i -> position in values's shape + val_idx[axis] = idx_val; + for (int j = out_ndim - 1, k = val_ndim - 1; j >= 0 || k >= 0; --j, --k) { + if (j >= 0 && k >= 0) { + if (valshape[k] == 1) { + val_idx[k] = 0; + } + } else if (j >= 0) { + val_idx[j] = 1; + } else { + break; + } + } + dest_idx = 0; + if (moveaxis) { + for (int _i = 0; _i < axis; ++_i) { + dest_idx += old_val_stride[_i + 1] * val_idx[_i]; + } + dest_idx += old_val_stride[0] * val_idx[axis]; + for (int _i = axis + 1; _i < val_ndim ; ++_i) { + dest_idx += old_val_stride[_i] *val_idx[_i]; + } + } else { + for (int _i =0; _i < val_ndim; ++_i) { + dest_idx += val_stride[_i] * val_idx[_i]; + } + } + KERNEL_ASSIGN(out_data[i], req, in_val[dest_idx]); + } else { + int idx_arr = (out_mid < index) ? out_mid : out_mid - numnew; + mshadow::Shape<10> arr_idx(out_idx); // i -> position in arr's shape + arr_idx[axis] = idx_arr; + dest_idx = 0; + for (int _i =0; _i < arr_ndim; ++_i) { + dest_idx += arr_stride[_i] * arr_idx[_i]; + } + KERNEL_ASSIGN(out_data[i], req, in_arr[dest_idx]); + } + } + + template + MSHADOW_XINLINE static void Map(int i, DType* out_data, + const DType* in_val, const DType* in_arr, + const mshadow::Shape<10> outshape, + const mshadow::Shape<10> valshape, + const int N, const IType* in_obj, const int numnew, + const mshadow::Shape<10> val_stride, + const mshadow::Shape<10> old_val_stride, + const mshadow::Shape<10> arr_stride, + const mshadow::Shape<10> out_stride, + const int arr_ndim, const int val_ndim, + const int out_ndim, const int axis, + bool moveaxis) { + const int64_t out_head = i / out_stride[axis]; + const int64_t out_mid = out_head % outshape[axis]; + mshadow::Shape<10> out_idx; // i -> position in output's shape + for (int j = 0; j < out_ndim; ++j) { + const int64_t head = i / out_stride[j]; + const int64_t mid = head % outshape[j]; + out_idx[j] = mid; + } + int64_t dest_idx; + IType index = in_obj[0]; + if (static_cast(index) < 0) { + index += static_cast(N); + } + if (out_mid >= index && out_mid < index + numnew) { + int idx_val = out_mid - index; + mshadow::Shape<10> val_idx(out_idx); + val_idx[axis] = idx_val; + for (int j = out_ndim - 1, k = val_ndim - 1; j >= 0 || k >= 0; --j, --k) { + if (j >= 0 && k >= 0) { + if (valshape[k] == 1) { + val_idx[k] = 0; + } + } else if (j >= 0) { + val_idx[j] = 1; + } else { + break; + } + } + dest_idx = 0; + if (moveaxis) { + for (int _i = 0; _i < axis; ++_i) { + dest_idx += old_val_stride[_i + 1] * val_idx[_i]; + } + dest_idx += old_val_stride[0] * val_idx[axis]; + for (int _i = axis + 1; _i < val_ndim ; ++_i) { + dest_idx += old_val_stride[_i] *val_idx[_i]; + } + } else { + for (int _i =0; _i < val_ndim; ++_i) { + dest_idx += val_stride[_i] * val_idx[_i]; + } + } + KERNEL_ASSIGN(out_data[i], req, in_val[dest_idx]); + } else { + int idx_arr = (out_mid < index) ? out_mid : out_mid - numnew; + mshadow::Shape<10> arr_idx(out_idx); // i -> position in arr's shape + arr_idx[axis] = idx_arr; + dest_idx = 0; + for (int _i =0; _i < arr_ndim; ++_i) { + dest_idx += arr_stride[_i] * arr_idx[_i]; + } + KERNEL_ASSIGN(out_data[i], req, in_arr[dest_idx]); + } + } +}; + +template +struct InsertSeqForward { + template + MSHADOW_XINLINE static void Map(int i, DType* out_data, + const DType* in_val, const DType* in_arr, + const mshadow::Shape<10> outshape, + const mshadow::Shape<10> valshape, + const int* is_insert, + const int* origin_idx, + const mshadow::Shape<10> val_stride, + const mshadow::Shape<10> arr_stride, + const mshadow::Shape<10> out_stride, + const int arr_ndim, const int val_ndim, + const int out_ndim, const int axis) { + const int64_t out_head = i / out_stride[axis]; + const int64_t out_mid = out_head % outshape[axis]; + mshadow::Shape<10> out_idx; // i -> position in output's shape + for (int j = 0; j < out_ndim; ++j) { + const int64_t head = i / out_stride[j]; + const int64_t mid = head % outshape[j]; + out_idx[j] = mid; + } + int64_t dest_idx; + if (is_insert[out_mid]) { + int idx_val = origin_idx[out_mid]; + mshadow::Shape<10> insert_idx(out_idx); // i -> position in insert's shape + insert_idx[axis] = idx_val; + mshadow::Shape<10> val_idx(insert_idx); // i -> position in values's shape + for (int j = out_ndim - 1, k = val_ndim - 1; j >= 0 || k >= 0; --j, --k) { + if (j >= 0 && k >= 0) { + if (valshape[k] == 1) { + val_idx[k] = 0; + } + } else if (j >= 0) { + val_idx[j] = 0; + } else { + break; + } + } + dest_idx = 0; + for (int _i =0; _i < val_ndim; ++_i) { + dest_idx += val_stride[_i] * val_idx[_i]; + } + KERNEL_ASSIGN(out_data[i], req, in_val[dest_idx]); + } else { + int idx_arr = origin_idx[out_mid]; + mshadow::Shape<10> arr_idx(out_idx); // i -> position in arr's shape + arr_idx[axis] = idx_arr; + dest_idx = 0; + for (int _i =0; _i < arr_ndim; ++_i) { + dest_idx += arr_stride[_i] * arr_idx[_i]; + } + out_data[i] = in_arr[dest_idx]; + KERNEL_ASSIGN(out_data[i], req, in_arr[dest_idx]); + } + } +}; + +struct SliceToIndices { + template + MSHADOW_XINLINE static void Map(int i, IType* indices, int N, + int start, int step) { + indices[i] = start + i * step; + if (static_cast(indices[i]) < 0) { + indices[i] += static_cast(N); + } + } +}; + +struct ObjToIndices { + template + MSHADOW_XINLINE static void Map(int i, IType* indices, + int N, const IType* obj) { + indices[i] = obj[i]; + if (static_cast(indices[i]) < 0) { + indices[i] += static_cast(N); + } + } +}; + +struct AssignId { + MSHADOW_XINLINE static void Map(int i, int* order) { + order[i] = i; + } +}; + +struct IndicesModify { + template + MSHADOW_XINLINE static void Map(int i, IType* indices, const int* order) { + indices[order[i]] += i; + } +}; + +struct AssignInsertZero { + MSHADOW_XINLINE static void Map(int i, int* is_insert) { + is_insert[i] = 0; + } +}; + +struct SetIsInsert { + template + MSHADOW_XINLINE static void Map(int i, IType* indices, int* is_insert) { + is_insert[static_cast(indices[i])] = 1; + } +}; + +struct SetOriginValuesIdx { + template + MSHADOW_XINLINE static void Map(int i, const IType* indices, int* origin_idx) { + origin_idx[static_cast(indices[i])] = i; + } +}; + +struct SetOriginArrIdx { + MSHADOW_XINLINE static void Map(int i, const int* is_insert, + int* origin_idx) { + if (!is_insert[i]) { + int cnt = 0; + for (int j = 0; j < i; ++j) { + if (is_insert[j] == 0) { + cnt++; + } + } + origin_idx[i] = cnt; + } + } +}; + +template +void NumpyInsertCompute(const nnvm::NodeAttrs& attrs, + const OpContext& ctx, + const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { + using namespace mshadow; + using namespace mxnet_op; + + const NumpyInsertParam& param = nnvm::get(attrs.parsed); + CHECK_EQ(inputs.size(), + (param.stop.has_value() || param.int_ind.has_value()) ? 2U : 3U); + CHECK_EQ(outputs.size(), 1U); + CHECK_EQ(req.size(), 1U); + mshadow::Stream *s = ctx.get_stream(); + int ndim = inputs[insert_::kArr].shape_.ndim(); + int axis = param.axis.has_value() ? param.axis.value() : 0; + TBlob arr, values; + if (!param.axis.has_value()) { + arr = inputs[insert_::kArr].reshape(Shape1(inputs[insert_::kArr].shape_.Size())); + ndim = 1; + } else if (ndim == 0) { + arr = inputs[insert_::kArr]; + CHECK_EQ(inputs[insert_::kValues].shape_.ndim(), 0) + << "'arr' is a 0-d array, 'values' can not assign to it. " + << "alueError: assignment to 0-d array."; + MSHADOW_TYPE_SWITCH(outputs[insert_::kOut].type_flag_, DType, { + MXNET_ASSIGN_REQ_SWITCH(req[insert_::kOut], req_type, { + Kernel, xpu>::Launch( + s, outputs[insert_::kOut].shape_.Size(), + outputs[insert_::kOut].dptr(), inputs[insert_::kValues].dptr()); + }); + }); + return; + } else { + arr = inputs[insert_::kArr]; + CHECK(axis >= -1 * arr.shape_.ndim() && axis < arr.shape_.ndim()) + << "Axis should be in the range of [-r, r-1] where r is the rank of input tensor"; + axis += (axis < 0) ? arr.shape_.ndim() : 0; + } + + int N = arr.shape_[axis]; + mxnet::TShape newshape(arr.shape_); + size_t indices_len = 0; + int start = 0, stop = 0, step = 0; + + // get and check indices from slice or sequence of ints + if (inputs.size() == 3U) { + indices_len = inputs[insert_::kObj].shape_.Size(); + } else if (param.stop.has_value()) { + step = param.step.value(); + CHECK_NE(step, 0) << "'step' can not equal to 0."; + stop = param.stop.value(); + stop += (stop < 0) ? N : 0; + stop = (stop < 0) ? ((step < 0) ? -1 : 0) : stop; + stop = (stop >= N) ? ((step < 0) ? N - 1 : N) : stop; + start = param.start.value(); + start += (start < 0) ? N : 0; + start = (start < 0) ? ((step < 0) ? -1 : 0) : start; + start = (start >= N) ? ((step < 0) ? N - 1 : N) : start; + int seq_cnt = 0; + if (step > 0 && stop >= start) { + seq_cnt = (stop - start + step - 1) / step; + } else if (step < 0 && stop <= start) { + seq_cnt = (stop - start + step + 1) / step; + } + indices_len = static_cast(seq_cnt); + } + + int numnew, index = 0; + mxnet::TShape val_newshape(arr.shape_.ndim(), -1); + for (int i = inputs[insert_::kValues].shape_.ndim() - 1, j = arr.shape_.ndim() - 1; + i >= 0 || j >= 0; --i, --j) { + if (i >= 0 && j >= 0) { + val_newshape[j] = inputs[insert_::kValues].shape_[i]; + } else if (i >= 0) { + CHECK_EQ(inputs[insert_::kValues].shape_[i], 1) << "index exceed limits."; + } else { + val_newshape[j] = 1; + } + } + values = inputs[insert_::kValues].reshape(val_newshape); + + mxnet::TShape old_valshape(values.shape_); + if (param.int_ind.has_value() || + (inputs.size() == 3U && inputs[insert_::kObj].shape_.ndim() == 0)) { + if (param.int_ind.has_value()) { + index = param.int_ind.value(); + CHECK(index >= -1 * N && index <= N) + << "Index should be in the range of [-r, r-1] where r is the dim size in 'axis'"; + if (index < 0) { + index += N; + } + } + numnew = values.shape_[0]; + + // If 'obj' is a int, then, values = moveaxis(values, 0, axis) + mxnet::TShape axes(values.ndim(), -1); + mxnet::TShape val_newshape(values.ndim(), -1); + int axes_id = 0; + for (int i = 1; i <= axis; ++i) { + axes[axes_id++] = i; + } + axes[axes_id++] = 0; + for (int i = axis + 1; i < values.ndim(); ++i) { + axes[axes_id++] = i; + } + for (int i = 0; i < values.ndim(); ++i) { + val_newshape[i] = values.shape_[axes[i]]; + } + values.shape_.assign(val_newshape.begin(), val_newshape.end()); + newshape[axis] += numnew; + } else if (indices_len == 1) { + numnew = values.shape_[axis]; + newshape[axis] += numnew; + if (param.start.has_value()) { + index = start; + CHECK(index >= -1 * N && index <= N) + << "Index should be in the range of [-r, r-1] where r is the dim size in 'axis'"; + if (index < 0) { + index += N; + } + } + } else { + numnew = static_cast(indices_len); + newshape[axis] += numnew; + } + + const mxnet::TShape& outshape = outputs[insert_::kOut].shape_; + mshadow::Shape<10> arr_strides; + int stride = 1; + for (int i = arr.shape_.ndim() - 1; i >= 0; --i) { + arr_strides[i] = stride; + stride *= arr.shape_[i]; + } + mshadow::Shape<10> val_strides; + stride = 1; + for (int i = values.shape_.ndim() - 1; i >= 0; --i) { + val_strides[i] = stride; + stride *= values.shape_[i]; + } + mshadow::Shape<10> old_val_strides; + stride = 1; + for (int i = old_valshape.ndim() - 1; i >= 0; --i) { + old_val_strides[i] = stride; + stride *= old_valshape[i]; + } + mshadow::Shape<10> out_strides; + stride = 1; + for (int i = outshape.ndim() - 1; i >= 0; --i) { + out_strides[i] = stride; + stride *= outshape[i]; + } + mshadow::Shape<10> k_outshape; + for (int i = 0 ; i < outshape.ndim() ; ++i) { + k_outshape[i] = outshape[i]; + } + mshadow::Shape<10> k_valshape; + for (int i = 0 ; i < values.shape_.ndim() ; ++i) { + k_valshape[i] = values.shape_[i]; + } + + if (param.int_ind.has_value()) { + MSHADOW_TYPE_SWITCH(outputs[insert_::kOut].type_flag_, DType, { + MXNET_ASSIGN_REQ_SWITCH(req[insert_::kOut], req_type, { + Kernel, xpu>::Launch(s, outshape.Size(), + outputs[insert_::kOut].dptr(), + values.dptr(), arr.dptr(), + k_outshape, k_valshape, index, numnew, + val_strides, old_val_strides, arr_strides, + out_strides, arr.shape_.ndim(), + values.shape_.ndim(), outshape.ndim(), + axis, true); + }); + }); + } else if (inputs.size() == 3U && inputs[insert_::kObj].shape_.ndim() == 0) { + MSHADOW_TYPE_SWITCH(outputs[insert_::kOut].type_flag_, DType, { + MXNET_ASSIGN_REQ_SWITCH(req[insert_::kOut], req_type, { + MSHADOW_TYPE_SWITCH(inputs[insert_::kObj].type_flag_, IType, { + Kernel, xpu>::Launch(s, outshape.Size(), + outputs[insert_::kOut].dptr(), + values.dptr(), arr.dptr(), + k_outshape, k_valshape, N, + inputs[insert_::kObj].dptr(), numnew, + val_strides, old_val_strides, arr_strides, + out_strides, arr.shape_.ndim(), + values.shape_.ndim(), outshape.ndim(), + axis, true); + }); + }); + }); + } else if (indices_len == 1) { + MSHADOW_TYPE_SWITCH(outputs[insert_::kOut].type_flag_, DType, { + MXNET_ASSIGN_REQ_SWITCH(req[insert_::kOut], req_type, { + if (param.stop.has_value()) { + Kernel, xpu>::Launch(s, outshape.Size(), + outputs[insert_::kOut].dptr(), + values.dptr(), arr.dptr(), + k_outshape, k_valshape, start, numnew, + val_strides, old_val_strides, arr_strides, out_strides, + arr.shape_.ndim(), values.shape_.ndim(), + outshape.ndim(), axis, false); + } else { + MSHADOW_TYPE_SWITCH(inputs[insert_::kObj].type_flag_, IType, { + Kernel, xpu>::Launch(s, outshape.Size(), + outputs[insert_::kOut].dptr(), + values.dptr(), arr.dptr(), + k_outshape, k_valshape, + N, inputs[insert_::kObj].dptr(), numnew, + val_strides, old_val_strides, + arr_strides, out_strides, + arr.shape_.ndim(), values.shape_.ndim(), + outshape.ndim(), axis, false); + }); + } + }); + }); + } else { + // broadcast check + for (int i = outshape.ndim() - 1; i >= 0; --i) { + int sz = outshape[i]; + if (i == axis) { + sz = numnew; + } + CHECK((values.shape_[i] == 1) || (values.shape_[i] == sz)); + } + size_t temp_storage_bytes, temp_mem_size; + MSHADOW_TYPE_SWITCH((inputs.size() == 3U) ? + inputs[insert_::kObj].type_flag_ : + mshadow::DataType::kFlag, IType, { + temp_storage_bytes = SortByKeyWorkspaceSize(indices_len, false, true); + temp_mem_size = indices_len * sizeof(IType) * 2 + + indices_len * sizeof(int) + + newshape[axis] * sizeof(int) * 2 + + temp_storage_bytes; + Tensor temp_mem = + ctx.requested[0].get_space_typed(Shape1(temp_mem_size), s); + IType* indices_ptr = reinterpret_cast(temp_mem.dptr_); + IType* sorted_indices_ptr = reinterpret_cast + (temp_mem.dptr_ + indices_len * sizeof(IType)); + int* order_ptr = reinterpret_cast(temp_mem.dptr_ + indices_len * sizeof(IType) * 2); + int* is_insert = reinterpret_cast(temp_mem.dptr_ + indices_len * sizeof(IType) * 2 + + indices_len * sizeof(int)); + int* origin_idx = reinterpret_cast(temp_mem.dptr_ + indices_len * sizeof(IType) * 2 + + indices_len * sizeof(int) + newshape[axis] * sizeof(int)); + Tensor temp_storage(temp_mem.dptr_ + indices_len * sizeof(IType) * 2 + + indices_len * sizeof(int) + newshape[axis] * sizeof(int) * 2, + Shape1(temp_storage_bytes), s); + Tensor indices(indices_ptr, Shape1(indices_len), s); + Tensor sorted_indices(sorted_indices_ptr, Shape1(indices_len), s); + Tensor order(order_ptr, Shape1(indices_len), s); + int num_bits = common::ilog2ui(static_cast(indices_len) - 1); + + if (param.stop.has_value()) { + Kernel::Launch(s, indices_len, + indices_ptr, N, + start, step); + } else { + Kernel::Launch(s, indices_len, + indices_ptr, N, + inputs[insert_::kObj].dptr()); + } + + Kernel::Launch(s, indices_len, order_ptr); + mxnet::op::SortByKey(indices, order, true, &temp_storage, 0, num_bits, &sorted_indices); + Kernel::Launch(s, indices_len, indices_ptr, order_ptr); + + Kernel::Launch(s, newshape[axis], is_insert); + Kernel::Launch(s, indices_len, indices_ptr, is_insert); + + Kernel::Launch(s, indices_len, indices_ptr, origin_idx); + Kernel::Launch(s, newshape[axis], is_insert, origin_idx); + + MSHADOW_TYPE_SWITCH(outputs[insert_::kOut].type_flag_, DType, { + MXNET_ASSIGN_REQ_SWITCH(req[insert_::kOut], req_type, { + Kernel, xpu>::Launch(s, outshape.Size(), + outputs[insert_::kOut].dptr(), + values.dptr(), arr.dptr(), + k_outshape, k_valshape, is_insert, origin_idx, + val_strides, arr_strides, out_strides, + arr.shape_.ndim(), values.shape_.ndim(), + outshape.ndim(), axis); + }); + }); + }); + } +} + +} // namespace op +} // namespace mxnet + +#endif // MXNET_OPERATOR_NUMPY_NP_INSERT_OP_INL_H_ diff --git a/src/operator/numpy/np_insert_op.cc b/src/operator/numpy/np_insert_op.cc new file mode 100644 index 000000000000..ea34e77f16d2 --- /dev/null +++ b/src/operator/numpy/np_insert_op.cc @@ -0,0 +1,166 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/*! + * Copyright (c) 2019 by Contributors + * \file np_insert_op.cc + * \brief CPU Implementation of numpy insert operations + */ + +#include +#include "./np_insert_op-inl.h" + +namespace mxnet { +namespace op { + +DMLC_REGISTER_PARAMETER(NumpyInsertParam); + +bool NumpyInsertType(const nnvm::NodeAttrs& attrs, + std::vector *in_type, + std::vector *out_type) { + const NumpyInsertParam& param = nnvm::get(attrs.parsed); + int insize = (param.stop.has_value() || param.int_ind.has_value()) ? 2 : 3; + CHECK_EQ(in_type->size(), insize); + CHECK_EQ(out_type->size(), 1U); + if (insize == 3) { + CHECK_NE((*in_type)[2], -1) << "Index type must be set for insert operator\n"; + CHECK(((*in_type)[2] == mshadow::DataType::kFlag) + || ((*in_type)[2] == mshadow::DataType::kFlag)) + << "Index type only support int32 or int64.\n"; + } + TYPE_ASSIGN_CHECK(*out_type, 0, (*in_type)[0]); + TYPE_ASSIGN_CHECK(*out_type, 0, (*in_type)[1]); + TYPE_ASSIGN_CHECK(*in_type, 0, (*out_type)[0]); + return (*in_type)[0] != -1; +} + +bool NumpyInsertShape(const nnvm::NodeAttrs& attrs, + mxnet::ShapeVector *in_shape, + mxnet::ShapeVector *out_shape) { + using namespace mshadow; + const NumpyInsertParam& param = nnvm::get(attrs.parsed); + CHECK_EQ(in_shape->size(), + (param.stop.has_value() || param.int_ind.has_value()) ? 2U : 3U); + mxnet::TShape &arrshape = (*in_shape)[insert_::kArr]; + mxnet::TShape &valshape = (*in_shape)[insert_::kValues]; + mxnet::TShape &objShape = (*in_shape)[insert_::kObj]; + if (in_shape->size() == 3U) { + CHECK_LE(objShape.ndim(), 1) + << "index array argument obj to insert must be one dimensional or scale.\n"; + } + + out_shape->clear(); + + int ndim = arrshape.ndim(); + int axis = param.axis.has_value() ? param.axis.value() : 0; + if (!(param.axis.has_value())) { + arrshape = Shape1(arrshape.Size()); + ndim = 1; + } else if (ndim == 0) { + CHECK_EQ(valshape.ndim(), 0) + << "'arr' is a 0-d array, 'values' can not assign to it. " + << "alueError: assignment to 0-d array."; + out_shape->push_back(valshape); + return shape_is_known(valshape); + } else { + CHECK(axis >= -1 * arrshape.ndim() && axis < arrshape.ndim()) + << "Axis should be in the range of [-r, r-1] where r is the rank of input tensor"; + axis += (axis < 0) ? arrshape.ndim() : 0; + } + + int seq_cnt = -1; + int N = arrshape[axis]; + if (in_shape->size() == 3U) { + seq_cnt = objShape.Size(); + } else if (param.stop.has_value()) { + int step = param.step.value(); + int stop = param.stop.value(); + stop += (stop < 0) ? N : 0; + stop = (stop < 0) ? ((step < 0) ? -1 : 0) : stop; + stop = (stop >= N) ? ((step < 0) ? N - 1 : N) : stop; + int start = param.start.value(); + start += (start < 0) ? N : 0; + start = (start < 0) ? ((step < 0) ? -1 : 0) : start; + start = (start >= N) ? ((step < 0) ? N - 1 : N) : start; + seq_cnt = 0; + if (step > 0 && stop >= start) { + seq_cnt = (stop - start + step - 1) / step; + } else if (step < 0 && stop <= start) { + seq_cnt = (stop - start + step + 1) / step; + } + } + + mxnet::TShape newshape(arrshape); + mxnet::TShape val_newshape(arrshape.ndim(), -1); + int numnew; + for (int i = valshape.ndim() - 1, j = arrshape.ndim() - 1; i >= 0 || j >= 0; --i, --j) { + if (i >= 0 && j >= 0) { + val_newshape[j] = valshape[i]; + } else if (i >= 0) { + CHECK_EQ(valshape[i], 1) << "index exceed limits."; + } else { + val_newshape[j] = 1; + } + } + valshape.assign(val_newshape.begin(), val_newshape.end()); + + if (param.int_ind.has_value() || + (in_shape->size() == 3U && objShape.ndim() == 0)) { + // because of moveaxis(values, 0, axis) + numnew = valshape[0]; + } else if (seq_cnt == 1) { + numnew = valshape[axis]; + } else { + numnew = seq_cnt; + } + + newshape[axis] += numnew; + out_shape->push_back(newshape); + return shape_is_known(newshape); +} + +NNVM_REGISTER_OP(_npi_insert) +.describe(R"code(Insert values along the given axis before the given indices.)code" ADD_FILELINE) +.set_attr_parser(ParamParser) +.set_num_inputs([](const NodeAttrs& attrs) { + const NumpyInsertParam& params = nnvm::get(attrs.parsed); + return (params.stop.has_value() || params.int_ind.has_value()) ? 2U : 3U; +}) +.set_num_outputs(1) +.set_attr("FListInputNames", + [](const NodeAttrs& attrs) { + const NumpyInsertParam& params = nnvm::get(attrs.parsed); + return (params.stop.has_value() || params.int_ind.has_value()) ? + std::vector{"arr", "values"} : + std::vector{"arr", "values", "obj"}; +}) +.set_attr("FInferShape", NumpyInsertShape) +.set_attr("FInferType", NumpyInsertType) +.set_attr("FCompute", NumpyInsertCompute) +.set_attr("FResourceRequest", + [](const NodeAttrs& attrs) { + return std::vector{ResourceRequest::kTempSpace}; + }) +.add_argument("arr", "NDArray-or-Symbol", "Input ndarray") +.add_argument("values", "NDArray-or-Symbol", "Input ndarray") +.add_argument("obj", "NDArray-or-Symbol", "Input ndarray") +.add_arguments(NumpyInsertParam::__FIELDS__()); + +} // namespace op +} // namespace mxnet diff --git a/src/operator/numpy/np_insert_op.cu b/src/operator/numpy/np_insert_op.cu new file mode 100644 index 000000000000..a53b6f4b3378 --- /dev/null +++ b/src/operator/numpy/np_insert_op.cu @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/*! + * Copyright (c) 2019 by Contributors + * \file np_insert_op.cu + * \brief GPU Implementation of numpy insert operations + */ + +#include "./np_insert_op-inl.h" + +namespace mxnet { +namespace op { + +NNVM_REGISTER_OP(_npi_insert) +.set_attr("FCompute", NumpyInsertCompute); + +} +} diff --git a/tests/python/unittest/test_numpy_interoperability.py b/tests/python/unittest/test_numpy_interoperability.py index e52d25239d22..9c1de2721b9c 100644 --- a/tests/python/unittest/test_numpy_interoperability.py +++ b/tests/python/unittest/test_numpy_interoperability.py @@ -827,6 +827,18 @@ def _add_workload_inner(): OpArgMngr.add_workload('inner', b, a) +def _add_workload_insert(): + a = np.arange(10) + for dt in [np.int32, np.int64]: + OpArgMngr.add_workload('insert', a, 0, np.array([0])) + OpArgMngr.add_workload('insert', a, np.array([], dtype=dt), np.array([])) + OpArgMngr.add_workload('insert', a, np.array([0, 1], dtype=dt), np.array([1, 2])) + OpArgMngr.add_workload('insert', a, slice(1, 2), np.array([1, 2])) + OpArgMngr.add_workload('insert', a, slice(1, -2, -1), np.array([])) + OpArgMngr.add_workload('insert', np.array([0, 1, 2]), np.array([1, 1, 1], dtype=dt), np.array([3, 4, 5])) + OpArgMngr.add_workload('insert', np.array(1), 0, np.array([0])) + + def _add_workload_hypot(): OpArgMngr.add_workload('hypot', np.array(1), np.array(1)) OpArgMngr.add_workload('hypot', np.array(0), np.array(0)) @@ -1305,6 +1317,7 @@ def _prepare_workloads(): _add_workload_degrees() _add_workload_true_divide() _add_workload_inner() + _add_workload_insert() _add_workload_hypot() _add_workload_lcm() _add_workload_bitwise_xor() diff --git a/tests/python/unittest/test_numpy_op.py b/tests/python/unittest/test_numpy_op.py index d019081ec3ee..c1a967c4597b 100644 --- a/tests/python/unittest/test_numpy_op.py +++ b/tests/python/unittest/test_numpy_op.py @@ -1958,6 +1958,108 @@ def hybrid_forward(self, F, x): assert same(mx_out.asnumpy(), np_out) +@with_seed() +@use_np +def test_np_insert(): + class TestInsert(HybridBlock): + def __init__(self, obj, axis=None): + super(TestInsert, self).__init__() + self._obj = obj + self._axis = axis + + def hybrid_forward(self, F, a, b): + return F.np.insert(a, self._obj, b, axis=self._axis) + + def GetSize(tp): + res = 1 + for x in tp: + res = res * x + return res + + def GetNdim(tp): + return len(tp) + + A = (3, 2) + B = (2) + C = (2, 2) + D = (2, 3) + E = (1) + F = (3, 1) + G = (3, 2) + H = (5, 2, 3, 4) + config = [] + # test scale index + for idx in range(-1 * GetSize(A), GetSize(A) + 1): + config.append(tuple([A, idx, B, None])) + config.append(tuple([A, idx, E, None])) + for idx in range(-1 * A[0], A[0] + 1): + config.append(tuple([A, idx, C, 0])) + config.append(tuple([A, idx, E, 0])) + config.append(tuple([A, idx, F, 0])) + for idx in range(-1 * A[1], A[1] + 1): + config.append(tuple([A, idx, D, 1])) + config.append(tuple([A, idx, E, 1])) + config.append(tuple([A, idx, F, 1])) + # test tuple of indices with size = 1 + for idx in range(-1 * GetSize(A), GetSize(A) + 1): + config.append(tuple([A, [idx], B, None])) + config.append(tuple([A, [idx], E, None])) + for idx in range(-1 * A[0], A[0] + 1): + config.append(tuple([A, [idx], C, 0])) + config.append(tuple([A, [idx], E, 0])) + config.append(tuple([A, [idx], F, 0])) + for idx in range(-1 * A[1], A[1] + 1): + config.append(tuple([A, [idx], G, 1])) + config.append(tuple([A, [idx], E, 1])) + config.append(tuple([A, [idx], F, 1])) + # test tuple of indices with size > 1 + for ax in range(-1 * GetNdim(A), GetNdim(A)): + idx = _np.random.randint(-1 * A[ax], A[ax] + 1, size = (3)).tolist() + config.append(tuple([A, idx, F, ax])) + config.append(tuple([A, slice(0, 3), F, ax])) + # test multidimensional array and unequal dimensions case + config.append(tuple([H, 0, D, 3])) + config.append(tuple([H, [1], E, 2])) + idx = _np.random.randint(-1 * H[3], H[3] + 1, size = (3)).tolist() + config.append(tuple([H, idx, E, 3])) + # test slice + for st in range(-5, 5): + for ed in range(-5,5): + for stp in [-1, 1, 2]: + config.append(tuple([A, slice(st, ed, stp), F, 1])) + + for arr_shape, obj, val_shape, axis in config: + for objtype in ['int32', 'int64']: + if type(obj) == list: + obj_mxnp = np.array(obj, dtype=objtype) + obj_onp = _np.array(obj, dtype=objtype) + elif type(obj) == slice: + obj_mxnp = obj + obj_onp = obj + else: + obj_mxnp = (_np.int32(obj) if objtype == 'int32' else _np.int64(obj)) + obj_onp = (_np.int32(obj) if objtype == 'int32' else _np.int64(obj)) + test_insert = TestInsert(obj=obj_mxnp, axis=axis) + + a = mx.nd.random.uniform(-1.0, 1.0, shape=arr_shape).as_np_ndarray() + a.attach_grad() + b = mx.nd.random.uniform(-1.0, 1.0, shape=val_shape).as_np_ndarray() + b.attach_grad() + expected_ret = _np.insert(a.asnumpy(), obj_onp, b.asnumpy(), axis=axis) + + with mx.autograd.record(): + y = test_insert(a, b) + + assert y.shape == expected_ret.shape + assert_almost_equal(y.asnumpy(), expected_ret, rtol=1e-3, atol=1e-5) + + #test imperative + mx_out = np.insert(a, obj_mxnp, b, axis=axis) + np_out = _np.insert(a.asnumpy(), obj_onp, b.asnumpy(), axis=axis) + + assert_almost_equal(mx_out.asnumpy(), np_out, rtol=1e-3, atol=1e-5) + + @with_seed() @use_np def test_np_split():