Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

[Numpy] Implement atleast_1d/2d/3d #17099

Merged
merged 1 commit into from
Jan 3, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
107 changes: 107 additions & 0 deletions python/mxnet/_numpy_op_doc.py
Original file line number Diff line number Diff line change
Expand Up @@ -408,6 +408,113 @@ def _np_copy(a, out=None):
pass


def _np_atleast_1d(*arys):
"""
Convert inputs to arrays with at least one dimension.

Scalar inputs are converted to 1-dimensional arrays, whilst higher-dimensional inputs are preserved.

Parameters
----------
arys1, arys2, ... : ndarray
One or more input arrays.

Returns
-------
ret : ndarray
An array, or list of arrays, each with a.ndim >= 1. Copies are made only if necessary.

See also
--------
atleast_2d, atleast_3d

Examples
--------
>>> np.atleast_1d(1.0)
array([1.])
>>> x = np.arange(9.0).reshape(3,3)
>>> np.atleast_1d(x)
array([[0., 1., 2.],
[3., 4., 5.],
[6., 7., 8.]])
>>> np.atleast_1d(np.array(1), np.array([3, 4]))
[array([1.]), array([3., 4.])]
"""
pass


def _np_atleast_2d(*arys):
"""
Convert inputs to arrays with at least two dimensions.

Parameters
----------
arys1, arys2, ... : ndarray
One or more input arrays.

Returns
-------
ret : ndarray
An array, or list of arrays, each with a.ndim >= 2. Copies are made only if necessary.

See also
--------
atleast_1d, atleast_3d

Examples
--------
>>> np.atleast_2d(3.0)
array([[3.]])
>>> x = np.arange(3.0)
>>> np.atleast_2d(x)
array([[0., 1., 2.]])
>>> np.atleast_2d(np.array(1), np.array([1, 2]), np.array([[1, 2]]))
[array([[1.]]), array([[1., 2.]]), array([[1., 2.]])]
"""
pass

def _np_atleast_3d(*arys):
"""
Convert inputs to arrays with at least three dimension.

Parameters
----------
arys1, arys2, ... : ndarray
One or more input arrays.

Returns
-------
ret : ndarray
An array, or list of arrays, each with a.ndim >= 3.
For example, a 1-D array of shape (N,) becomes a view of shape (1, N, 1),
and a 2-D array of shape (M, N) becomes a view of shape (M, N, 1).

See also
--------
atleast_1d, atleast_2d

Examples
--------
>>> np.atleast_3d(3.0)
array([[[3.]]])
>>> x = np.arange(3.0)
>>> np.atleast_3d(x).shape
(1, 3, 1)
>>> x = np.arange(12.0).reshape(4,3)
>>> np.atleast_3d(x).shape
(4, 3, 1)
>>> for arr in np.atleast_3d(np.array([1, 2]), np.array([[1, 2]]), np.array([[[1, 2]]])):
... print(arr, arr.shape)
...
[[[1.]
[2.]]] (1, 2, 1)
[[[1.]
[2.]]] (1, 2, 1)
[[[1. 2.]]] (1, 1, 2)
"""
pass


def _np_reshape(a, newshape, order='C', out=None):
"""
Gives a new shape to an array without changing its data.
Expand Down
3 changes: 3 additions & 0 deletions python/mxnet/numpy_dispatch_protocol.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,6 +107,9 @@ def _run_with_array_ufunc_proto(*args, **kwargs):
'min',
'nonzero',
'ones_like',
'atleast_1d',
'atleast_2d',
'atleast_3d',
'prod',
'ravel',
'repeat',
Expand Down
71 changes: 71 additions & 0 deletions src/operator/numpy/np_init_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ DMLC_REGISTER_PARAMETER(NumpyEyeParam);
DMLC_REGISTER_PARAMETER(IndicesOpParam);
DMLC_REGISTER_PARAMETER(LogspaceParam);
DMLC_REGISTER_PARAMETER(FullLikeOpParam);
DMLC_REGISTER_PARAMETER(AtleastNDParam);

inline bool NumpyIndicesShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_shapes,
Expand Down Expand Up @@ -98,6 +99,76 @@ NNVM_REGISTER_OP(_npi_identity)
.set_attr<FCompute>("FCompute<cpu>", IdentityCompute<cpu>)
.add_arguments(InitOpParam::__FIELDS__());

template<int NDim>
inline bool AtleastNDShape(const nnvm::NodeAttrs& attrs,
std::vector<mxnet::TShape> *in_attrs,
std::vector<mxnet::TShape> *out_attrs) {
auto &param = nnvm::get<AtleastNDParam>(attrs.parsed);

CHECK_EQ(in_attrs->size(), param.num_args);
CHECK_EQ(out_attrs->size(), param.num_args);

for (int i = 0; i < param.num_args; ++i) {
auto &shape = in_attrs->at(i);
if (shape.ndim() < NDim) {
mxnet::TShape new_shape(NDim, 1);
if (NDim == 2) {
if (shape.ndim() == 1) {
new_shape[1] = shape[0];
}
} else if (NDim == 3) {
if (shape.ndim() == 1) {
new_shape[1] = shape[0];
} else if (shape.ndim() == 2) {
new_shape[0] = shape[0];
new_shape[1] = shape[1];
}
}
SHAPE_ASSIGN_CHECK(*out_attrs, i, new_shape);
} else {
SHAPE_ASSIGN_CHECK(*out_attrs, i, shape);
}
}

return shape_is_known(*in_attrs) && shape_is_known(*out_attrs);
}

#define NNVM_REGISTER_ATLEAST_ND(N) \
NNVM_REGISTER_OP(_np_atleast_##N##d) \
.set_attr_parser(ParamParser<AtleastNDParam>) \
.set_num_inputs( \
[](const NodeAttrs& attrs) { \
auto &param = nnvm::get<AtleastNDParam>(attrs.parsed); \
return param.num_args; \
}) \
.set_num_outputs( \
[](const NodeAttrs& attrs) { \
auto &param = nnvm::get<AtleastNDParam>(attrs.parsed); \
return param.num_args; \
}) \
.set_attr<std::string>("key_var_num_args", "num_args") \
.set_attr<nnvm::FListInputNames>("FListInputNames", \
[](const nnvm::NodeAttrs& attrs) { \
int num_args = nnvm::get<AtleastNDParam>(attrs.parsed).num_args; \
std::vector<std::string> ret; \
for (int i = 0; i < num_args; i++) { \
ret.push_back(std::string("ary") + std::to_string(i)); \
} \
return ret; \
}) \
.set_attr<nnvm::FInferType>("FInferType", ElemwiseType<-1, -1>) \
.set_attr<mxnet::FInferShape>("FInferShape", AtleastNDShape<N>) \
.set_attr<nnvm::FGradient>("FGradient", MakeZeroGradNodes) \
.set_attr<FCompute>("FCompute<cpu>", AtleastNDCompute<cpu>) \
.add_argument("arys", "NDArray-or-Symbol[]", "List of input arrays") \
.add_arguments(AtleastNDParam::__FIELDS__()) \

NNVM_REGISTER_ATLEAST_ND(1);

NNVM_REGISTER_ATLEAST_ND(2);

NNVM_REGISTER_ATLEAST_ND(3);

NNVM_REGISTER_OP(_npi_full_like)
.set_num_inputs(1)
.set_num_outputs(1)
Expand Down
9 changes: 9 additions & 0 deletions src/operator/numpy/np_init_op.cu
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,15 @@ NNVM_REGISTER_OP(_npi_identity)
NNVM_REGISTER_OP(_npi_full_like)
.set_attr<FCompute>("FCompute<gpu>", FullLikeOpCompute<gpu>);

NNVM_REGISTER_OP(_np_atleast_1d)
.set_attr<FCompute>("FCompute<gpu>", AtleastNDCompute<gpu>);

NNVM_REGISTER_OP(_np_atleast_2d)
.set_attr<FCompute>("FCompute<gpu>", AtleastNDCompute<gpu>);

NNVM_REGISTER_OP(_np_atleast_3d)
.set_attr<FCompute>("FCompute<gpu>", AtleastNDCompute<gpu>);

NNVM_REGISTER_OP(_npi_arange)
.set_attr<FCompute>("FCompute<gpu>", RangeCompute<gpu, RangeParam>);

Expand Down
23 changes: 23 additions & 0 deletions src/operator/numpy/np_init_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -272,6 +272,29 @@ void LogspaceCompute(const nnvm::NodeAttrs& attrs,
});
}

struct AtleastNDParam : dmlc::Parameter<AtleastNDParam> {
int num_args;
DMLC_DECLARE_PARAMETER(AtleastNDParam) {
DMLC_DECLARE_FIELD(num_args)
.set_lower_bound(1)
.describe("Number of input arrays.");
}
};

template<typename xpu>
void AtleastNDCompute(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
auto &param = nnvm::get<AtleastNDParam>(attrs.parsed);
CHECK_EQ(inputs.size(), param.num_args);
CHECK_EQ(outputs.size(), param.num_args);
for (int i = 0; i < param.num_args; ++i) {
UnaryOp::IdentityCompute<xpu>(attrs, ctx, {inputs[i]}, {req[i]}, {outputs[i]});
}
}

} // namespace op
} // namespace mxnet

Expand Down
25 changes: 25 additions & 0 deletions tests/python/unittest/test_numpy_interoperability.py
Original file line number Diff line number Diff line change
Expand Up @@ -700,6 +700,30 @@ def _add_workload_ones_like(array_pool):
OpArgMngr.add_workload('ones_like', array_pool['4x1'])


def _add_workload_atleast_nd():
a_0 = np.array(1)
b_0 = np.array(2)
a_1 = np.array([1, 2])
b_1 = np.array([2, 3])
a_2 = np.array([[1, 2], [1, 2]])
b_2 = np.array([[2, 3], [2, 3]])
a_3 = [a_2, a_2]
b_3 = [b_2, b_2]

OpArgMngr.add_workload('atleast_1d', a_0, b_0)
OpArgMngr.add_workload('atleast_1d', a_1, b_1)
OpArgMngr.add_workload('atleast_1d', a_2, b_2)
OpArgMngr.add_workload('atleast_1d', a_3, b_3)
OpArgMngr.add_workload('atleast_2d', a_0, b_0)
OpArgMngr.add_workload('atleast_2d', a_1, b_1)
OpArgMngr.add_workload('atleast_2d', a_2, b_2)
OpArgMngr.add_workload('atleast_2d', a_3, b_3)
OpArgMngr.add_workload('atleast_3d', a_0, b_0)
OpArgMngr.add_workload('atleast_3d', a_1, b_1)
OpArgMngr.add_workload('atleast_3d', a_2, b_2)
OpArgMngr.add_workload('atleast_3d', a_3, b_3)


def _add_workload_prod(array_pool):
OpArgMngr.add_workload('prod', array_pool['4x1'])

Expand Down Expand Up @@ -1429,6 +1453,7 @@ def _prepare_workloads():
_add_workload_mean(array_pool)
_add_workload_nonzero()
_add_workload_ones_like(array_pool)
_add_workload_atleast_nd()
_add_workload_prod(array_pool)
_add_workload_repeat(array_pool)
_add_workload_reshape()
Expand Down
54 changes: 54 additions & 0 deletions tests/python/unittest/test_numpy_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -2121,6 +2121,60 @@ def hybrid_forward(self, F, a):
assert_almost_equal(mx_out.asnumpy(), np_out, rtol=1e-3, atol=1e-5)


@with_seed()
@use_np
def test_np_atleast_nd():
class TestAtleastND(HybridBlock):
def __init__(self, n):
super(TestAtleastND, self).__init__()
self._n = n

def hybrid_forward(self, F, *arys):
if self._n == 1:
return F.np.atleast_1d(*arys)
elif self._n == 2:
return F.np.atleast_2d(*arys)
elif self._n == 3:
return F.np.atleast_3d(*arys)

tensor_shapes = [
((), (2,), (3, 4, 5)),
((2, 3, 4, 5), (), (2, 3))
]
flags = [True, False]
ns = [1, 2, 3]
dtypes = ['int32', 'int64', 'float16', 'float32', 'float64']
funcs = {
"numpy": {1: lambda *ts: _np.atleast_1d(*ts),
2: lambda *ts: _np.atleast_2d(*ts),
3: lambda *ts: _np.atleast_3d(*ts)},
"mxnet": {1: lambda *ts: np.atleast_1d(*ts),
2: lambda *ts: np.atleast_2d(*ts),
3: lambda *ts: np.atleast_3d(*ts)}
}
for hybridize, n, tensor_shape, dtype in \
itertools.product(flags, ns, tensor_shapes, dtypes):
test_atleast_nd = TestAtleastND(n)
if hybridize:
test_atleast_nd.hybridize()
if dtype in ['int32', 'int64']:
tensors = list(map(lambda s: np.random.randint(-1, 1, size=s, dtype=dtype), tensor_shape))
else:
tensors = list(map(lambda s: np.random.uniform(-1.0, 1.0, size=s, dtype=dtype), tensor_shape))
tensors_np = [t.asnumpy() for t in tensors]
mx_out = test_atleast_nd(*tensors)
np_out = funcs["numpy"][n](*tensors_np)
for i in range(len(tensors)):
assert mx_out[i].shape == np_out[i].shape
same(mx_out[i].asnumpy(), np_out[i])

mx_out = funcs["mxnet"][n](*tensors)
np_out = funcs["numpy"][n](*tensors_np)
for i in range(len(tensors)):
assert mx_out[i].shape == np_out[i].shape
same(mx_out[i].asnumpy(), np_out[i])


@with_seed()
@use_np
def test_np_arange():
Expand Down