From 976379978941c38f69ef13c3c019c7f40876a302 Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Wed, 23 Oct 2019 14:19:12 +0000 Subject: [PATCH 01/23] implement np.column_stack --- python/mxnet/ndarray/numpy/_op.py | 39 +++++- python/mxnet/numpy/multiarray.py | 38 +++++- python/mxnet/numpy_dispatch_protocol.py | 1 + python/mxnet/symbol/numpy/_symbol.py | 37 +++++- src/operator/numpy/np_matrix_op-inl.h | 82 ++++++++++++ src/operator/numpy/np_matrix_op.cc | 158 +++++++++++++++++++++++- src/operator/numpy/np_matrix_op.cu | 6 + tests/python/unittest/test_numpy_op.py | 56 +++++++++ 8 files changed, 411 insertions(+), 6 deletions(-) diff --git a/python/mxnet/ndarray/numpy/_op.py b/python/mxnet/ndarray/numpy/_op.py index cf66e29d6205..042dfab3951b 100644 --- a/python/mxnet/ndarray/numpy/_op.py +++ b/python/mxnet/ndarray/numpy/_op.py @@ -38,8 +38,7 @@ 'std', 'var', 'indices', 'copysign', 'ravel', 'hanning', 'hamming', 'blackman', 'flip', 'around', 'hypot', 'rad2deg', 'deg2rad', 'unique', 'lcm', 'tril', 'identity', 'take', 'ldexp', 'vdot', 'inner', 'outer', 'equal', 'not_equal', 'greater', 'less', 'greater_equal', 'less_equal', - 'hsplit', 'rot90', 'einsum', 'true_divide'] - + 'hsplit', 'rot90', 'einsum', 'true_divide', 'column_stack'] @set_module('mxnet.ndarray.numpy') def zeros(shape, dtype=_np.float32, order='C', ctx=None): @@ -4761,3 +4760,39 @@ def einsum(*operands, **kwargs): subscripts = operands[0] operands = operands[1:] return _npi.einsum(*operands, subscripts=subscripts, out=out, optimize=int(optimize_arg)) + +@set_module('mxnet.ndarray.numpy') +def column_stack(tup): + """ column_stack(*args, **kwargs) + + Stack 1-D arrays as columns into a 2-D array. + + Take a sequence of 1-D arrays and stack them as columns + to make a single 2-D array. 2-D arrays are stacked as-is, + just like with `hstack`. 1-D arrays are turned into 2-D columns + first. + + Parameters + ---------- + tup : sequence of 1-D or 2-D arrays. + Arrays to stack. All of them must have the same first dimension. + + Returns + ------- + stacked : 2-D array + The array formed by stacking the given arrays. + + See Also + -------- + stack, hstack, vstack, concatenate + + Examples + -------- + >>> a = np.array((1,2,3)) + >>> b = np.array((2,3,4)) + >>> np.column_stack((a,b)) + array([[1, 2], + [2, 3], + [3, 4]]) + """ + return _npi.column_stack(*tup) diff --git a/python/mxnet/numpy/multiarray.py b/python/mxnet/numpy/multiarray.py index 623b5fc482d7..cdc593738aab 100644 --- a/python/mxnet/numpy/multiarray.py +++ b/python/mxnet/numpy/multiarray.py @@ -55,7 +55,7 @@ 'swapaxes', 'clip', 'argmax', 'std', 'var', 'indices', 'copysign', 'ravel', 'hanning', 'hamming', 'blackman', 'flip', 'around', 'arctan2', 'hypot', 'rad2deg', 'deg2rad', 'unique', 'lcm', 'tril', 'identity', 'take', 'ldexp', 'vdot', 'inner', 'outer', 'equal', 'not_equal', 'greater', 'less', - 'greater_equal', 'less_equal', 'hsplit', 'rot90', 'einsum', 'true_divide'] + 'greater_equal', 'less_equal', 'hsplit', 'rot90', 'einsum', 'true_divide', 'column_stack'] # Return code for dispatching indexing function call _NDARRAY_UNSUPPORTED_INDEXING = -1 @@ -6419,3 +6419,39 @@ def einsum(*operands, **kwargs): ... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=True) """ return _mx_nd_np.einsum(*operands, **kwargs) + +@set_module('mxnet.numpy') +def column_stack(tup): + """ column_stack(*args, **kwargs) + + Stack 1-D arrays as columns into a 2-D array. + + Take a sequence of 1-D arrays and stack them as columns + to make a single 2-D array. 2-D arrays are stacked as-is, + just like with `hstack`. 1-D arrays are turned into 2-D columns + first. + + Parameters + ---------- + tup : sequence of 1-D or 2-D arrays. + Arrays to stack. All of them must have the same first dimension. + + Returns + ------- + stacked : 2-D array + The array formed by stacking the given arrays. + + See Also + -------- + stack, hstack, vstack, concatenate + + Examples + -------- + >>> a = np.array((1,2,3)) + >>> b = np.array((2,3,4)) + >>> np.column_stack((a,b)) + array([[1, 2], + [2, 3], + [3, 4]]) + """ + return _mx_nd_np.column_stack(tup) diff --git a/python/mxnet/numpy_dispatch_protocol.py b/python/mxnet/numpy_dispatch_protocol.py index 6db44fad7780..a2b06edf2275 100644 --- a/python/mxnet/numpy_dispatch_protocol.py +++ b/python/mxnet/numpy_dispatch_protocol.py @@ -119,6 +119,7 @@ def _run_with_array_ufunc_proto(*args, **kwargs): 'var', 'vdot', 'vstack', + 'column_stack', 'zeros_like', 'linalg.norm', 'trace', diff --git a/python/mxnet/symbol/numpy/_symbol.py b/python/mxnet/symbol/numpy/_symbol.py index aa456c8e5166..288af9bc9239 100644 --- a/python/mxnet/symbol/numpy/_symbol.py +++ b/python/mxnet/symbol/numpy/_symbol.py @@ -40,7 +40,7 @@ 'std', 'var', 'indices', 'copysign', 'ravel', 'hanning', 'hamming', 'blackman', 'flip', 'around', 'hypot', 'rad2deg', 'deg2rad', 'unique', 'lcm', 'tril', 'identity', 'take', 'ldexp', 'vdot', 'inner', 'outer', 'equal', 'not_equal', 'greater', 'less', 'greater_equal', - 'less_equal', 'hsplit', 'rot90', 'einsum', 'true_divide'] + 'less_equal', 'hsplit', 'rot90', 'einsum', 'true_divide', 'column_stack'] def _num_outputs(sym): @@ -4554,5 +4554,40 @@ def einsum(*operands, **kwargs): operands = operands[1:] return _npi.einsum(*operands, subscripts=subscripts, out=out, optimize=int(optimize_arg)) +@set_module('mxnet.symbol.numpy') +def column_stack(tup): + """ column_stack(*args, **kwargs) + + Stack 1-D arrays as columns into a 2-D array. + + Take a sequence of 1-D arrays and stack them as columns + to make a single 2-D array. 2-D arrays are stacked as-is, + just like with `hstack`. 1-D arrays are turned into 2-D columns + first. + + Parameters + ---------- + tup : sequence of 1-D or 2-D arrays. + Arrays to stack. All of them must have the same first dimension. + + Returns + ------- + stacked : 2-D array + The array formed by stacking the given arrays. + + See Also + -------- + stack, hstack, vstack, concatenate + + Examples + -------- + >>> a = np.array((1,2,3)) + >>> b = np.array((2,3,4)) + >>> np.column_stack((a,b)) + array([[1, 2], + [2, 3], + [3, 4]]) + """ + return _npi.column_stack(*tup) _set_np_symbol_class(_Symbol) diff --git a/src/operator/numpy/np_matrix_op-inl.h b/src/operator/numpy/np_matrix_op-inl.h index b3206bf4aa75..edda278edc21 100644 --- a/src/operator/numpy/np_matrix_op-inl.h +++ b/src/operator/numpy/np_matrix_op-inl.h @@ -51,6 +51,14 @@ struct NumpyVstackParam : public dmlc::Parameter { } }; +struct NumpyColumnStackParam : public dmlc::Parameter { + int num_args; + DMLC_DECLARE_PARAMETER(NumpyColumnStackParam) { + DMLC_DECLARE_FIELD(num_args).set_lower_bound(1) + .describe("Number of inputs to be column stacked"); + } +}; + template void NumpyTranspose(const nnvm::NodeAttrs& attrs, const OpContext& ctx, @@ -71,6 +79,80 @@ void NumpyTranspose(const nnvm::NodeAttrs& attrs, } } +template +void NumpyColumnStackForward(const nnvm::NodeAttrs& attrs, + const OpContext& ctx, + const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { + using namespace mshadow; + using namespace mshadow_op; + + const NumpyColumnStackParam& param = nnvm::get(attrs.parsed); + CHECK_EQ(inputs.size(), param.num_args); + CHECK_EQ(outputs.size(), 1); + CHECK_EQ(req.size(), 1); + + // reshape if necessary + std::vector data(param.num_args); + for (int i = 0; i < param.num_args; i++) { + if (inputs[i].shape_.ndim() == 0 || inputs[i].shape_.ndim() == 1) { + // TShape shape = Shape2(1, inputs[i].shape_.Size()); + TShape shape = Shape2(inputs[i].shape_.Size(), 1); + data[i] = inputs[i].reshape(shape); + } else { + data[i] = inputs[i]; + } + } + + // initialize ConcatOp + ConcatParam cparam; + cparam.num_args = param.num_args; + // cparam.dim = 0; + cparam.dim = 1; + MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, { + ConcatOp op; + op.Init(cparam); + op.Forward(ctx, data, req, outputs); + }); +} + +template +void NumpyColumnStackBackward(const nnvm::NodeAttrs& attrs, + const OpContext& ctx, + const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { + using namespace mshadow; + using namespace mshadow_op; + + const NumpyColumnStackParam& param = nnvm::get(attrs.parsed); + CHECK_EQ(inputs.size(), 1); + CHECK_EQ(outputs.size(), param.num_args); + CHECK_EQ(req.size(), param.num_args); + + // reshape if necessary + std::vector data(param.num_args); + for (int i = 0; i < param.num_args; i++) { + if (outputs[i].shape_.ndim() == 0 || outputs[i].shape_.ndim() == 1) { + TShape shape = Shape2(outputs[i].shape_.Size(), 1); + data[i] = outputs[i].reshape(shape); + } else { + data[i] = outputs[i]; + } + } + + // initialize ConcatOp + ConcatParam cparam; + cparam.num_args = param.num_args; + cparam.dim = 1; + MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, { + ConcatOp op; + op.Init(cparam); + op.Backward(ctx, inputs[0], req, data); + }); +} + template void NumpyVstackForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, diff --git a/src/operator/numpy/np_matrix_op.cc b/src/operator/numpy/np_matrix_op.cc index 7bcd6ad27b52..541be4b9c1ad 100644 --- a/src/operator/numpy/np_matrix_op.cc +++ b/src/operator/numpy/np_matrix_op.cc @@ -24,6 +24,7 @@ */ #include +#include #include "./np_matrix_op-inl.h" #include "../nn/concat-inl.h" @@ -447,6 +448,161 @@ Examples:: .add_argument("data", "NDArray-or-Symbol[]", "List of arrays to stack") .add_arguments(StackParam::__FIELDS__()); +void dbg_print_shape(TShape &shape, const char *msg = nullptr) { + using namespace std; + if (msg) + cout << "dbg_print_shape: " << msg << " "; + else + cout << "dbg_print_shape: "; + for (int i = 0; i < shape.ndim(); ++i) + cout << shape[i] << " "; + cout << endl; +} + +bool NumpyColumnStackType(const nnvm::NodeAttrs& attrs, + std::vector *in_type, + std::vector *out_type) { + const NumpyColumnStackParam& param = nnvm::get(attrs.parsed); + CHECK_EQ(in_type->size(), param.num_args); + CHECK_EQ(out_type->size(), 1); + int dtype = -1; + for (int i = 0; i < param.num_args; i++) { + if (dtype == -1) { + dtype = in_type->at(i); + } + } + if (dtype == -1) { + dtype = out_type->at(0); + } + for (int i = 0; i < param.num_args; i++) { + TYPE_ASSIGN_CHECK(*in_type, i, dtype); + } + TYPE_ASSIGN_CHECK(*out_type, 0, dtype); + return dtype != -1; +} + +bool NumpyColumnStackShape(const nnvm::NodeAttrs& attrs, + mxnet::ShapeVector* in_attrs, + mxnet::ShapeVector* out_attrs) { + + CHECK_EQ(out_attrs->size(), 1U); + const NumpyColumnStackParam& param = nnvm::get(attrs.parsed); + CHECK_EQ(in_attrs->size(), param.num_args); + std::vector in_attrs_tmp(param.num_args); + TShape dshape; + for (int i = 0; i < param.num_args; i++) { + if ((*in_attrs)[i].ndim() == 0) { + in_attrs_tmp[i] = TShape(2, 1); + } else if ((*in_attrs)[i].ndim() == 1) { + in_attrs_tmp[i] = TShape(2, 1); + in_attrs_tmp[i][0] = (*in_attrs)[i][0]; + } else { + in_attrs_tmp[i] = (*in_attrs)[i]; + } + TShape tmp(in_attrs_tmp[i].ndim(), -1); + shape_assign(&dshape, tmp); + } + TShape tmp((*out_attrs)[0].ndim(), -1); + shape_assign(&dshape, tmp); + for (int i = 0; i < param.num_args; i++) { + SHAPE_ASSIGN_CHECK(in_attrs_tmp, i, dshape) + } + SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape) + if (dshape.ndim() == -1) { + return false; + } + int cnt = 0, sum = 0, pos = -1; + for (int i = 0; i < param.num_args; i++) { + TShape tmp = in_attrs_tmp[i]; + if (!dim_size_is_known(tmp, 1)) { + cnt++; + pos = i; + } else { + sum += tmp[1]; + } + tmp[1] = -1; + shape_assign(&dshape, tmp); + } + tmp = out_attrs->at(0); + if (!dim_size_is_known(tmp, 1)) { + cnt++; + pos = -1; + } else { + sum += tmp[1]; + } + tmp[1] = -1; + shape_assign(&dshape, tmp); + for (int i = 0; i < param.num_args; i++) { + SHAPE_ASSIGN_CHECK(in_attrs_tmp, i, dshape) + } + SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape)\ + dshape[1] = 0; + if (!shape_is_known(dshape)) { + return false; + } + + dshape[1] = sum; + if (cnt == 0) { + SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape); + } else if (cnt == 1) { + if (pos >= 0) { + in_attrs_tmp[pos][1] = out_attrs->at(0)[1] - sum; + } else { + out_attrs->at(0)[1] = sum; + } + } else { + return false; + } + for (int i = 0; i < param.num_args; i++) { + if (in_attrs->at(i).ndim() == 1) { + in_attrs->at(i)[0] = in_attrs_tmp[i][1]; + } else if (in_attrs->at(i).ndim() >= 2) { + in_attrs->at(i) = in_attrs_tmp[i]; + } + } + + return true; +} + +DMLC_REGISTER_PARAMETER(NumpyColumnStackParam); + +NNVM_REGISTER_OP(_npi_column_stack) +.describe(R"code()code" ADD_FILELINE) +.set_attr_parser(ParamParser) +.set_num_inputs([](const nnvm::NodeAttrs& attrs) { + const NumpyColumnStackParam& param = dmlc::get(attrs.parsed); + return static_cast(param.num_args); +}) +.set_num_outputs(1) +.set_attr("FListInputNames", + [](const nnvm::NodeAttrs& attrs) { + int num_args = dmlc::get(attrs.parsed).num_args; + std::vector ret; + for (int i = 0; i < num_args; ++i) { + ret.push_back(std::string("arg") + std::to_string(i)); + } + return ret; + }) +.set_attr("key_var_num_args", "num_args") +.set_attr("FInferShape", NumpyColumnStackShape) +.set_attr("FInferType", NumpyColumnStackType) +.set_attr("FCompute", NumpyColumnStackForward) +.set_attr("FGradient", ElemwiseGradUseNone{"_backward_np_column_stack"}) +.add_argument("data", "NDArray-or-Symbol[]", "List of arrays to column_stack") +.add_arguments(NumpyColumnStackParam::__FIELDS__()); + +NNVM_REGISTER_OP(_backward_np_column_stack) +.set_attr_parser(ParamParser) +.set_num_inputs(1) +.set_num_outputs([](const nnvm::NodeAttrs& attrs) { + const NumpyColumnStackParam& param = dmlc::get(attrs.parsed); + return static_cast(param.num_args); +}) +.set_attr("TIsBackward", true) +.set_attr("FCompute", NumpyColumnStackBackward); + +DMLC_REGISTER_PARAMETER(NumpyVstackParam); + bool NumpyVstackType(const nnvm::NodeAttrs& attrs, std::vector *in_type, std::vector *out_type) { @@ -552,8 +708,6 @@ bool NumpyVstackShape(const nnvm::NodeAttrs& attrs, return true; } -DMLC_REGISTER_PARAMETER(NumpyVstackParam); - NNVM_REGISTER_OP(_npi_vstack) .describe(R"code()code" ADD_FILELINE) .set_attr_parser(ParamParser) diff --git a/src/operator/numpy/np_matrix_op.cu b/src/operator/numpy/np_matrix_op.cu index 8c8301bb3bbf..194c5aa39033 100644 --- a/src/operator/numpy/np_matrix_op.cu +++ b/src/operator/numpy/np_matrix_op.cu @@ -59,6 +59,12 @@ NNVM_REGISTER_OP(_npi_dstack) NNVM_REGISTER_OP(_backward_np_dstack) .set_attr("FCompute", DStackGradCompute); +NNVM_REGISTER_OP(_npi_column_stack) +.set_attr("FCompute", NumpyColumnStackForward); + +NNVM_REGISTER_OP(_backward_np_column_stack) +.set_attr("FCompute", NumpyColumnStackBackward); + NNVM_REGISTER_OP(_np_roll) .set_attr("FCompute", NumpyRollCompute); diff --git a/tests/python/unittest/test_numpy_op.py b/tests/python/unittest/test_numpy_op.py index 4f01fa9ed4fa..91e5830e6f47 100644 --- a/tests/python/unittest/test_numpy_op.py +++ b/tests/python/unittest/test_numpy_op.py @@ -3542,6 +3542,62 @@ def test_np_true_divide(): assert_almost_equal(out_mx.asnumpy(), out_np, rtol=1e-3, atol=1e-3, use_broadcast=False) +@with_seed() +@use_np +def test_np_column_stack(): + class TestColumnStack(HybridBlock): + def __init__(self): + super(TestColumnStack, self).__init__() + + def hybrid_forward(self, F, a, *args): + return F.np.column_stack([a] + list(args)) + + def g(data): + return _np.ones_like(data) + + configs = [ + ((), (), ()), + ((2), (2), (2)), + ((1, 3), (1, 3), (1, 3)), + ((0), (0), (0)), + ((2, 2), (2, 1), (2, 3)), + ((4, 3), (4, 4), (4, 1)), + ((2, 2, 2), (2, 4, 2), (2, 2, 2)), + ((0, 1, 1), (0, 1, 1), (0, 1, 1)), + ((2, 1), (2, 2), (2, 2)) + ] + types = ['float16', 'float32', 'float64', 'int8', 'int32', 'int64'] + for config in configs: + for hybridize in [True, False]: + for dtype in types: + test_column_stack = TestColumnStack() + if hybridize: + test_column_stack.hybridize() + rtol = 1e-3 + atol = 1e-5 + v = [] + v_np = [] + for i in range(3): + v_np.append(_np.array(_np.random.uniform(-10.0, 10.0, config[i]), dtype=dtype)) + v.append(mx.nd.array(v_np[i]).as_np_ndarray()) + v[i].attach_grad() + expected_np = _np.column_stack(v_np) + with mx.autograd.record(): + mx_out = test_column_stack(*v) + assert mx_out.shape == expected_np.shape + assert_almost_equal(mx_out.asnumpy(), expected_np, rtol=rtol, atol=atol) + + # Test gradient + mx_out.backward() + for i in range(3): + expected_grad = g(v_np[i]) + assert_almost_equal(v[i].grad.asnumpy(), expected_grad, rtol=rtol, atol=atol) + + # Test imperative once again + mx_out = np.column_stack(v) + expected_np = _np.column_stack(v_np) + assert_almost_equal(mx_out.asnumpy(), expected_np, rtol=rtol, atol=atol) + if __name__ == '__main__': import nose nose.runmodule() From f92682f1ceca5ef8cb974f6b790113ed9099cee8 Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Wed, 23 Oct 2019 15:03:59 +0000 Subject: [PATCH 02/23] cpplint --- src/operator/numpy/np_matrix_op.cc | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/src/operator/numpy/np_matrix_op.cc b/src/operator/numpy/np_matrix_op.cc index 541be4b9c1ad..1a2a36126728 100644 --- a/src/operator/numpy/np_matrix_op.cc +++ b/src/operator/numpy/np_matrix_op.cc @@ -24,7 +24,6 @@ */ #include -#include #include "./np_matrix_op-inl.h" #include "../nn/concat-inl.h" @@ -448,17 +447,6 @@ Examples:: .add_argument("data", "NDArray-or-Symbol[]", "List of arrays to stack") .add_arguments(StackParam::__FIELDS__()); -void dbg_print_shape(TShape &shape, const char *msg = nullptr) { - using namespace std; - if (msg) - cout << "dbg_print_shape: " << msg << " "; - else - cout << "dbg_print_shape: "; - for (int i = 0; i < shape.ndim(); ++i) - cout << shape[i] << " "; - cout << endl; -} - bool NumpyColumnStackType(const nnvm::NodeAttrs& attrs, std::vector *in_type, std::vector *out_type) { @@ -484,7 +472,6 @@ bool NumpyColumnStackType(const nnvm::NodeAttrs& attrs, bool NumpyColumnStackShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector* in_attrs, mxnet::ShapeVector* out_attrs) { - CHECK_EQ(out_attrs->size(), 1U); const NumpyColumnStackParam& param = nnvm::get(attrs.parsed); CHECK_EQ(in_attrs->size(), param.num_args); From 3bd75114cab250d0a66c5084ff83d310263585ee Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Wed, 23 Oct 2019 15:49:13 +0000 Subject: [PATCH 03/23] remove column_stack from numpy interoperability test temporarily --- python/mxnet/numpy_dispatch_protocol.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/mxnet/numpy_dispatch_protocol.py b/python/mxnet/numpy_dispatch_protocol.py index a2b06edf2275..592698df111c 100644 --- a/python/mxnet/numpy_dispatch_protocol.py +++ b/python/mxnet/numpy_dispatch_protocol.py @@ -119,7 +119,7 @@ def _run_with_array_ufunc_proto(*args, **kwargs): 'var', 'vdot', 'vstack', - 'column_stack', + # 'column_stack', 'zeros_like', 'linalg.norm', 'trace', From 5da9f188434b3fa6241d72f14df22fc2de11a54e Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Thu, 24 Oct 2019 05:36:56 +0000 Subject: [PATCH 04/23] style and test fix --- python/mxnet/ndarray/numpy/_op.py | 76 +++++++++--------- python/mxnet/numpy/multiarray.py | 79 ++++++++++--------- python/mxnet/numpy_dispatch_protocol.py | 2 +- python/mxnet/symbol/numpy/_symbol.py | 78 +++++++++--------- src/operator/numpy/np_matrix_op-inl.h | 16 ++-- src/operator/numpy/np_matrix_op.cc | 8 +- .../unittest/test_numpy_interoperability.py | 6 ++ tests/python/unittest/test_numpy_op.py | 8 +- 8 files changed, 142 insertions(+), 131 deletions(-) diff --git a/python/mxnet/ndarray/numpy/_op.py b/python/mxnet/ndarray/numpy/_op.py index 042dfab3951b..249b783f5167 100644 --- a/python/mxnet/ndarray/numpy/_op.py +++ b/python/mxnet/ndarray/numpy/_op.py @@ -34,11 +34,11 @@ 'log1p', 'rint', 'radians', 'reciprocal', 'square', 'negative', 'fix', 'ceil', 'floor', 'trunc', 'logical_not', 'arcsinh', 'arccosh', 'arctanh', 'tensordot', 'histogram', 'eye', 'linspace', 'logspace', 'expand_dims', 'tile', 'arange', 'split', 'vsplit', 'concatenate', - 'stack', 'vstack', 'dstack', 'mean', 'maximum', 'minimum', 'swapaxes', 'clip', 'argmax', + 'stack', 'vstack', 'column_stack', 'dstack', 'mean', 'maximum', 'minimum', 'swapaxes', 'clip', 'argmax', 'std', 'var', 'indices', 'copysign', 'ravel', 'hanning', 'hamming', 'blackman', 'flip', 'around', 'hypot', 'rad2deg', 'deg2rad', 'unique', 'lcm', 'tril', 'identity', 'take', 'ldexp', 'vdot', 'inner', 'outer', 'equal', 'not_equal', 'greater', 'less', 'greater_equal', 'less_equal', - 'hsplit', 'rot90', 'einsum', 'true_divide', 'column_stack'] + 'hsplit', 'rot90', 'einsum', 'true_divide'] @set_module('mxnet.ndarray.numpy') def zeros(shape, dtype=_np.float32, order='C', ctx=None): @@ -3003,6 +3003,43 @@ def get_list(arrays): return _npi.vstack(*arrays) +@set_module('mxnet.ndarray.numpy') +def column_stack(tup): + """ column_stack(*args, **kwargs) + + Stack 1-D arrays as columns into a 2-D array. + + Take a sequence of 1-D arrays and stack them as columns + to make a single 2-D array. 2-D arrays are stacked as-is, + just like with `hstack`. 1-D arrays are turned into 2-D columns + first. + + Parameters + ---------- + tup : sequence of 1-D or 2-D arrays. + Arrays to stack. All of them must have the same first dimension. + + Returns + ------- + stacked : 2-D array + The array formed by stacking the given arrays. + + See Also + -------- + stack, hstack, vstack, concatenate + + Examples + -------- + >>> a = np.array((1,2,3)) + >>> b = np.array((2,3,4)) + >>> np.column_stack((a,b)) + array([[1, 2], + [2, 3], + [3, 4]]) + """ + return _npi.column_stack(*tup) + + @set_module('mxnet.ndarray.numpy') def dstack(arrays): """ @@ -4761,38 +4798,3 @@ def einsum(*operands, **kwargs): operands = operands[1:] return _npi.einsum(*operands, subscripts=subscripts, out=out, optimize=int(optimize_arg)) -@set_module('mxnet.ndarray.numpy') -def column_stack(tup): - """ column_stack(*args, **kwargs) - - Stack 1-D arrays as columns into a 2-D array. - - Take a sequence of 1-D arrays and stack them as columns - to make a single 2-D array. 2-D arrays are stacked as-is, - just like with `hstack`. 1-D arrays are turned into 2-D columns - first. - - Parameters - ---------- - tup : sequence of 1-D or 2-D arrays. - Arrays to stack. All of them must have the same first dimension. - - Returns - ------- - stacked : 2-D array - The array formed by stacking the given arrays. - - See Also - -------- - stack, hstack, vstack, concatenate - - Examples - -------- - >>> a = np.array((1,2,3)) - >>> b = np.array((2,3,4)) - >>> np.column_stack((a,b)) - array([[1, 2], - [2, 3], - [3, 4]]) - """ - return _npi.column_stack(*tup) diff --git a/python/mxnet/numpy/multiarray.py b/python/mxnet/numpy/multiarray.py index cdc593738aab..6477d88ff510 100644 --- a/python/mxnet/numpy/multiarray.py +++ b/python/mxnet/numpy/multiarray.py @@ -50,12 +50,12 @@ 'sqrt', 'cbrt', 'abs', 'absolute', 'exp', 'expm1', 'arcsin', 'arccos', 'arctan', 'sign', 'log', 'degrees', 'log2', 'log1p', 'rint', 'radians', 'reciprocal', 'square', 'negative', 'fix', 'ceil', 'floor', 'trunc', 'logical_not', 'arcsinh', 'arccosh', 'arctanh', - 'tensordot', 'histogram', 'eye', 'linspace', 'logspace', 'expand_dims', 'tile', 'arange', - 'split', 'vsplit', 'concatenate', 'stack', 'vstack', 'dstack', 'mean', 'maximum', 'minimum', + 'tensordot', 'histogram', 'eye', 'linspace', 'logspace', 'expand_dims', 'tile', 'arange', 'split', + 'vsplit', 'concatenate', 'stack', 'vstack', 'column_stack', 'dstack', 'mean', 'maximum', 'minimum', 'swapaxes', 'clip', 'argmax', 'std', 'var', 'indices', 'copysign', 'ravel', 'hanning', 'hamming', 'blackman', 'flip', 'around', 'arctan2', 'hypot', 'rad2deg', 'deg2rad', 'unique', 'lcm', 'tril', 'identity', 'take', 'ldexp', 'vdot', 'inner', 'outer', 'equal', 'not_equal', 'greater', 'less', - 'greater_equal', 'less_equal', 'hsplit', 'rot90', 'einsum', 'true_divide', 'column_stack'] + 'greater_equal', 'less_equal', 'hsplit', 'rot90', 'einsum', 'true_divide'] # Return code for dispatching indexing function call _NDARRAY_UNSUPPORTED_INDEXING = -1 @@ -4619,6 +4619,43 @@ def vstack(arrays, out=None): return _mx_nd_np.vstack(arrays) +@set_module('mxnet.numpy') +def column_stack(tup): + """ column_stack(*args, **kwargs) + + Stack 1-D arrays as columns into a 2-D array. + + Take a sequence of 1-D arrays and stack them as columns + to make a single 2-D array. 2-D arrays are stacked as-is, + just like with `hstack`. 1-D arrays are turned into 2-D columns + first. + + Parameters + ---------- + tup : sequence of 1-D or 2-D arrays. + Arrays to stack. All of them must have the same first dimension. + + Returns + ------- + stacked : 2-D array + The array formed by stacking the given arrays. + + See Also + -------- + stack, hstack, vstack, concatenate + + Examples + -------- + >>> a = np.array((1,2,3)) + >>> b = np.array((2,3,4)) + >>> np.column_stack((a,b)) + array([[1, 2], + [2, 3], + [3, 4]]) + """ + return _mx_nd_np.column_stack(tup) + + @set_module('mxnet.numpy') def dstack(arrays): """ @@ -6419,39 +6456,3 @@ def einsum(*operands, **kwargs): ... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=True) """ return _mx_nd_np.einsum(*operands, **kwargs) - -@set_module('mxnet.numpy') -def column_stack(tup): - """ column_stack(*args, **kwargs) - - Stack 1-D arrays as columns into a 2-D array. - - Take a sequence of 1-D arrays and stack them as columns - to make a single 2-D array. 2-D arrays are stacked as-is, - just like with `hstack`. 1-D arrays are turned into 2-D columns - first. - - Parameters - ---------- - tup : sequence of 1-D or 2-D arrays. - Arrays to stack. All of them must have the same first dimension. - - Returns - ------- - stacked : 2-D array - The array formed by stacking the given arrays. - - See Also - -------- - stack, hstack, vstack, concatenate - - Examples - -------- - >>> a = np.array((1,2,3)) - >>> b = np.array((2,3,4)) - >>> np.column_stack((a,b)) - array([[1, 2], - [2, 3], - [3, 4]]) - """ - return _mx_nd_np.column_stack(tup) diff --git a/python/mxnet/numpy_dispatch_protocol.py b/python/mxnet/numpy_dispatch_protocol.py index 592698df111c..a2b06edf2275 100644 --- a/python/mxnet/numpy_dispatch_protocol.py +++ b/python/mxnet/numpy_dispatch_protocol.py @@ -119,7 +119,7 @@ def _run_with_array_ufunc_proto(*args, **kwargs): 'var', 'vdot', 'vstack', - # 'column_stack', + 'column_stack', 'zeros_like', 'linalg.norm', 'trace', diff --git a/python/mxnet/symbol/numpy/_symbol.py b/python/mxnet/symbol/numpy/_symbol.py index 288af9bc9239..277813eabe3e 100644 --- a/python/mxnet/symbol/numpy/_symbol.py +++ b/python/mxnet/symbol/numpy/_symbol.py @@ -36,11 +36,11 @@ 'rint', 'radians', 'reciprocal', 'square', 'negative', 'fix', 'ceil', 'floor', 'trunc', 'logical_not', 'arcsinh', 'arccosh', 'arctanh', 'tensordot', 'histogram', 'eye', 'linspace', 'logspace', 'expand_dims', 'tile', 'arange', 'split', 'vsplit', 'concatenate', - 'stack', 'vstack', 'dstack', 'mean', 'maximum', 'minimum', 'swapaxes', 'clip', 'argmax', - 'std', 'var', 'indices', 'copysign', 'ravel', 'hanning', 'hamming', 'blackman', 'flip', + 'stack', 'vstack', 'column_stack', 'dstack', 'mean', 'maximum', 'minimum', 'swapaxes', 'clip', + 'argmax', 'std', 'var', 'indices', 'copysign', 'ravel', 'hanning', 'hamming', 'blackman', 'flip', 'around', 'hypot', 'rad2deg', 'deg2rad', 'unique', 'lcm', 'tril', 'identity', 'take', 'ldexp', 'vdot', 'inner', 'outer', 'equal', 'not_equal', 'greater', 'less', 'greater_equal', - 'less_equal', 'hsplit', 'rot90', 'einsum', 'true_divide', 'column_stack'] + 'less_equal', 'hsplit', 'rot90', 'einsum', 'true_divide'] def _num_outputs(sym): @@ -3075,6 +3075,43 @@ def get_list(arrays): return _npi.vstack(*arrays) +@set_module('mxnet.symbol.numpy') +def column_stack(tup): + """ column_stack(*args, **kwargs) + + Stack 1-D arrays as columns into a 2-D array. + + Take a sequence of 1-D arrays and stack them as columns + to make a single 2-D array. 2-D arrays are stacked as-is, + just like with `hstack`. 1-D arrays are turned into 2-D columns + first. + + Parameters + ---------- + tup : sequence of 1-D or 2-D arrays. + Arrays to stack. All of them must have the same first dimension. + + Returns + ------- + stacked : 2-D array + The array formed by stacking the given arrays. + + See Also + -------- + stack, hstack, vstack, concatenate + + Examples + -------- + >>> a = np.array((1,2,3)) + >>> b = np.array((2,3,4)) + >>> np.column_stack((a,b)) + array([[1, 2], + [2, 3], + [3, 4]]) + """ + return _npi.column_stack(*tup) + + @set_module('mxnet.symbol.numpy') def dstack(arrays): """ @@ -4554,40 +4591,5 @@ def einsum(*operands, **kwargs): operands = operands[1:] return _npi.einsum(*operands, subscripts=subscripts, out=out, optimize=int(optimize_arg)) -@set_module('mxnet.symbol.numpy') -def column_stack(tup): - """ column_stack(*args, **kwargs) - - Stack 1-D arrays as columns into a 2-D array. - - Take a sequence of 1-D arrays and stack them as columns - to make a single 2-D array. 2-D arrays are stacked as-is, - just like with `hstack`. 1-D arrays are turned into 2-D columns - first. - - Parameters - ---------- - tup : sequence of 1-D or 2-D arrays. - Arrays to stack. All of them must have the same first dimension. - - Returns - ------- - stacked : 2-D array - The array formed by stacking the given arrays. - - See Also - -------- - stack, hstack, vstack, concatenate - - Examples - -------- - >>> a = np.array((1,2,3)) - >>> b = np.array((2,3,4)) - >>> np.column_stack((a,b)) - array([[1, 2], - [2, 3], - [3, 4]]) - """ - return _npi.column_stack(*tup) _set_np_symbol_class(_Symbol) diff --git a/src/operator/numpy/np_matrix_op-inl.h b/src/operator/numpy/np_matrix_op-inl.h index edda278edc21..b357f7406206 100644 --- a/src/operator/numpy/np_matrix_op-inl.h +++ b/src/operator/numpy/np_matrix_op-inl.h @@ -81,10 +81,10 @@ void NumpyTranspose(const nnvm::NodeAttrs& attrs, template void NumpyColumnStackForward(const nnvm::NodeAttrs& attrs, - const OpContext& ctx, - const std::vector& inputs, - const std::vector& req, - const std::vector& outputs) { + const OpContext& ctx, + const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { using namespace mshadow; using namespace mshadow_op; @@ -119,10 +119,10 @@ void NumpyColumnStackForward(const nnvm::NodeAttrs& attrs, template void NumpyColumnStackBackward(const nnvm::NodeAttrs& attrs, - const OpContext& ctx, - const std::vector& inputs, - const std::vector& req, - const std::vector& outputs) { + const OpContext& ctx, + const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { using namespace mshadow; using namespace mshadow_op; diff --git a/src/operator/numpy/np_matrix_op.cc b/src/operator/numpy/np_matrix_op.cc index 1a2a36126728..ea6e7da4cce3 100644 --- a/src/operator/numpy/np_matrix_op.cc +++ b/src/operator/numpy/np_matrix_op.cc @@ -448,8 +448,8 @@ Examples:: .add_arguments(StackParam::__FIELDS__()); bool NumpyColumnStackType(const nnvm::NodeAttrs& attrs, - std::vector *in_type, - std::vector *out_type) { + std::vector *in_type, + std::vector *out_type) { const NumpyColumnStackParam& param = nnvm::get(attrs.parsed); CHECK_EQ(in_type->size(), param.num_args); CHECK_EQ(out_type->size(), 1); @@ -470,8 +470,8 @@ bool NumpyColumnStackType(const nnvm::NodeAttrs& attrs, } bool NumpyColumnStackShape(const nnvm::NodeAttrs& attrs, - mxnet::ShapeVector* in_attrs, - mxnet::ShapeVector* out_attrs) { + mxnet::ShapeVector* in_attrs, + mxnet::ShapeVector* out_attrs) { CHECK_EQ(out_attrs->size(), 1U); const NumpyColumnStackParam& param = nnvm::get(attrs.parsed); CHECK_EQ(in_attrs->size(), param.num_args); diff --git a/tests/python/unittest/test_numpy_interoperability.py b/tests/python/unittest/test_numpy_interoperability.py index 9e8156f3239c..6f6f15ee2b71 100644 --- a/tests/python/unittest/test_numpy_interoperability.py +++ b/tests/python/unittest/test_numpy_interoperability.py @@ -970,6 +970,11 @@ def _add_workload_vstack(array_pool): OpArgMngr.add_workload('vstack', array_pool['4x1']) OpArgMngr.add_workload('vstack', array_pool['1x1x0']) +def _add_workload_column_stack(): + OpArgMngr.add_workload('column_stack', (np.array([1, 2, 3]), np.array([2, 3, 4]))) + OpArgMngr.add_workload('column_stack', (np.array([[1], [2], [3]]), np.array([[2], [3], [4]]))) + OpArgMngr.add_workload('column_stack', [np.array(_np.arange(3)) for _ in range(2)]) + def _add_workload_equal(array_pool): # TODO(junwu): fp16 does not work yet with TVM generated ops @@ -1122,6 +1127,7 @@ def _prepare_workloads(): _add_workload_logical_not(array_pool) _add_workload_vdot() _add_workload_vstack(array_pool) + _add_workload_column_stack() _add_workload_equal(array_pool) _add_workload_not_equal(array_pool) _add_workload_greater(array_pool) diff --git a/tests/python/unittest/test_numpy_op.py b/tests/python/unittest/test_numpy_op.py index 91e5830e6f47..07ac877a961d 100644 --- a/tests/python/unittest/test_numpy_op.py +++ b/tests/python/unittest/test_numpy_op.py @@ -3558,13 +3558,12 @@ def g(data): configs = [ ((), (), ()), ((2), (2), (2)), - ((1, 3), (1, 3), (1, 3)), ((0), (0), (0)), + ((0, 3, 0), (0, 0, 0), (0, 1, 0)), ((2, 2), (2, 1), (2, 3)), - ((4, 3), (4, 4), (4, 1)), + ((4, 3), (4, 0), (4, 1)), ((2, 2, 2), (2, 4, 2), (2, 2, 2)), - ((0, 1, 1), (0, 1, 1), (0, 1, 1)), - ((2, 1), (2, 2), (2, 2)) + ((0, 1, 1), (0, 1, 1), (0, 1, 1)) ] types = ['float16', 'float32', 'float64', 'int8', 'int32', 'int64'] for config in configs: @@ -3598,6 +3597,7 @@ def g(data): expected_np = _np.column_stack(v_np) assert_almost_equal(mx_out.asnumpy(), expected_np, rtol=rtol, atol=atol) + if __name__ == '__main__': import nose nose.runmodule() From 46be8aa9beed732625d1e1bda1f70674af244164 Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Thu, 24 Oct 2019 05:50:20 +0000 Subject: [PATCH 05/23] fix pylint and add interoperability test --- python/mxnet/ndarray/numpy/_op.py | 5 +-- python/mxnet/numpy/multiarray.py | 4 +- python/mxnet/symbol/numpy/_symbol.py | 4 +- tests/python/unittest/test_numpy_op.py | 56 +++++++++++++------------- 4 files changed, 33 insertions(+), 36 deletions(-) diff --git a/python/mxnet/ndarray/numpy/_op.py b/python/mxnet/ndarray/numpy/_op.py index 249b783f5167..97a5cca4e33b 100644 --- a/python/mxnet/ndarray/numpy/_op.py +++ b/python/mxnet/ndarray/numpy/_op.py @@ -3034,8 +3034,8 @@ def column_stack(tup): >>> b = np.array((2,3,4)) >>> np.column_stack((a,b)) array([[1, 2], - [2, 3], - [3, 4]]) + [2, 3], + [3, 4]]) """ return _npi.column_stack(*tup) @@ -4797,4 +4797,3 @@ def einsum(*operands, **kwargs): subscripts = operands[0] operands = operands[1:] return _npi.einsum(*operands, subscripts=subscripts, out=out, optimize=int(optimize_arg)) - diff --git a/python/mxnet/numpy/multiarray.py b/python/mxnet/numpy/multiarray.py index 6477d88ff510..ef6affab2d20 100644 --- a/python/mxnet/numpy/multiarray.py +++ b/python/mxnet/numpy/multiarray.py @@ -4650,8 +4650,8 @@ def column_stack(tup): >>> b = np.array((2,3,4)) >>> np.column_stack((a,b)) array([[1, 2], - [2, 3], - [3, 4]]) + [2, 3], + [3, 4]]) """ return _mx_nd_np.column_stack(tup) diff --git a/python/mxnet/symbol/numpy/_symbol.py b/python/mxnet/symbol/numpy/_symbol.py index 277813eabe3e..5e31db966480 100644 --- a/python/mxnet/symbol/numpy/_symbol.py +++ b/python/mxnet/symbol/numpy/_symbol.py @@ -3106,8 +3106,8 @@ def column_stack(tup): >>> b = np.array((2,3,4)) >>> np.column_stack((a,b)) array([[1, 2], - [2, 3], - [3, 4]]) + [2, 3], + [3, 4]]) """ return _npi.column_stack(*tup) diff --git a/tests/python/unittest/test_numpy_op.py b/tests/python/unittest/test_numpy_op.py index 07ac877a961d..c01f3c94e58d 100644 --- a/tests/python/unittest/test_numpy_op.py +++ b/tests/python/unittest/test_numpy_op.py @@ -3566,36 +3566,34 @@ def g(data): ((0, 1, 1), (0, 1, 1), (0, 1, 1)) ] types = ['float16', 'float32', 'float64', 'int8', 'int32', 'int64'] - for config in configs: - for hybridize in [True, False]: - for dtype in types: - test_column_stack = TestColumnStack() - if hybridize: - test_column_stack.hybridize() - rtol = 1e-3 - atol = 1e-5 - v = [] - v_np = [] - for i in range(3): - v_np.append(_np.array(_np.random.uniform(-10.0, 10.0, config[i]), dtype=dtype)) - v.append(mx.nd.array(v_np[i]).as_np_ndarray()) - v[i].attach_grad() - expected_np = _np.column_stack(v_np) - with mx.autograd.record(): - mx_out = test_column_stack(*v) - assert mx_out.shape == expected_np.shape - assert_almost_equal(mx_out.asnumpy(), expected_np, rtol=rtol, atol=atol) - - # Test gradient - mx_out.backward() - for i in range(3): - expected_grad = g(v_np[i]) - assert_almost_equal(v[i].grad.asnumpy(), expected_grad, rtol=rtol, atol=atol) + for config, hybridize, dtype in itertools.product(configs, [True, False], types): + test_column_stack = TestColumnStack() + if hybridize: + test_column_stack.hybridize() + rtol = 1e-3 + atol = 1e-5 + v = [] + v_np = [] + for i in range(3): + v_np.append(_np.array(_np.random.uniform(-10.0, 10.0, config[i]), dtype=dtype)) + v.append(mx.nd.array(v_np[i]).as_np_ndarray()) + v[i].attach_grad() + expected_np = _np.column_stack(v_np) + with mx.autograd.record(): + mx_out = test_column_stack(*v) + assert mx_out.shape == expected_np.shape + assert_almost_equal(mx_out.asnumpy(), expected_np, rtol=rtol, atol=atol) - # Test imperative once again - mx_out = np.column_stack(v) - expected_np = _np.column_stack(v_np) - assert_almost_equal(mx_out.asnumpy(), expected_np, rtol=rtol, atol=atol) + # Test gradient + mx_out.backward() + for i in range(3): + expected_grad = g(v_np[i]) + assert_almost_equal(v[i].grad.asnumpy(), expected_grad, rtol=rtol, atol=atol) + + # Test imperative once again + mx_out = np.column_stack(v) + expected_np = _np.column_stack(v_np) + assert_almost_equal(mx_out.asnumpy(), expected_np, rtol=rtol, atol=atol) if __name__ == '__main__': From b5fc7f22a6eb6a5001ad730a3b94f42962c43bca Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Thu, 24 Oct 2019 09:33:35 +0000 Subject: [PATCH 06/23] fix doc string, add comment, remove dead code --- python/mxnet/ndarray/numpy/_op.py | 9 ++++----- python/mxnet/numpy/multiarray.py | 9 ++++----- python/mxnet/symbol/numpy/_symbol.py | 9 ++++----- src/operator/numpy/np_matrix_op-inl.h | 2 -- src/operator/numpy/np_matrix_op.cc | 5 ++++- 5 files changed, 16 insertions(+), 18 deletions(-) diff --git a/python/mxnet/ndarray/numpy/_op.py b/python/mxnet/ndarray/numpy/_op.py index 97a5cca4e33b..ea42b73cb24e 100644 --- a/python/mxnet/ndarray/numpy/_op.py +++ b/python/mxnet/ndarray/numpy/_op.py @@ -3005,8 +3005,7 @@ def get_list(arrays): @set_module('mxnet.ndarray.numpy') def column_stack(tup): - """ column_stack(*args, **kwargs) - + """ Stack 1-D arrays as columns into a 2-D array. Take a sequence of 1-D arrays and stack them as columns @@ -3033,9 +3032,9 @@ def column_stack(tup): >>> a = np.array((1,2,3)) >>> b = np.array((2,3,4)) >>> np.column_stack((a,b)) - array([[1, 2], - [2, 3], - [3, 4]]) + array([[1., 2.], + [2., 3.], + [3., 4.]]) """ return _npi.column_stack(*tup) diff --git a/python/mxnet/numpy/multiarray.py b/python/mxnet/numpy/multiarray.py index ef6affab2d20..61426904da12 100644 --- a/python/mxnet/numpy/multiarray.py +++ b/python/mxnet/numpy/multiarray.py @@ -4621,8 +4621,7 @@ def vstack(arrays, out=None): @set_module('mxnet.numpy') def column_stack(tup): - """ column_stack(*args, **kwargs) - + """ Stack 1-D arrays as columns into a 2-D array. Take a sequence of 1-D arrays and stack them as columns @@ -4649,9 +4648,9 @@ def column_stack(tup): >>> a = np.array((1,2,3)) >>> b = np.array((2,3,4)) >>> np.column_stack((a,b)) - array([[1, 2], - [2, 3], - [3, 4]]) + array([[1., 2.], + [2., 3.], + [3., 4.]]) """ return _mx_nd_np.column_stack(tup) diff --git a/python/mxnet/symbol/numpy/_symbol.py b/python/mxnet/symbol/numpy/_symbol.py index 5e31db966480..fefb443922d8 100644 --- a/python/mxnet/symbol/numpy/_symbol.py +++ b/python/mxnet/symbol/numpy/_symbol.py @@ -3077,8 +3077,7 @@ def get_list(arrays): @set_module('mxnet.symbol.numpy') def column_stack(tup): - """ column_stack(*args, **kwargs) - + """ Stack 1-D arrays as columns into a 2-D array. Take a sequence of 1-D arrays and stack them as columns @@ -3105,9 +3104,9 @@ def column_stack(tup): >>> a = np.array((1,2,3)) >>> b = np.array((2,3,4)) >>> np.column_stack((a,b)) - array([[1, 2], - [2, 3], - [3, 4]]) + array([[1., 2.], + [2., 3.], + [3., 4.]]) """ return _npi.column_stack(*tup) diff --git a/src/operator/numpy/np_matrix_op-inl.h b/src/operator/numpy/np_matrix_op-inl.h index b357f7406206..233500019e31 100644 --- a/src/operator/numpy/np_matrix_op-inl.h +++ b/src/operator/numpy/np_matrix_op-inl.h @@ -97,7 +97,6 @@ void NumpyColumnStackForward(const nnvm::NodeAttrs& attrs, std::vector data(param.num_args); for (int i = 0; i < param.num_args; i++) { if (inputs[i].shape_.ndim() == 0 || inputs[i].shape_.ndim() == 1) { - // TShape shape = Shape2(1, inputs[i].shape_.Size()); TShape shape = Shape2(inputs[i].shape_.Size(), 1); data[i] = inputs[i].reshape(shape); } else { @@ -108,7 +107,6 @@ void NumpyColumnStackForward(const nnvm::NodeAttrs& attrs, // initialize ConcatOp ConcatParam cparam; cparam.num_args = param.num_args; - // cparam.dim = 0; cparam.dim = 1; MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, { ConcatOp op; diff --git a/src/operator/numpy/np_matrix_op.cc b/src/operator/numpy/np_matrix_op.cc index ea6e7da4cce3..8ceaf0ce1efc 100644 --- a/src/operator/numpy/np_matrix_op.cc +++ b/src/operator/numpy/np_matrix_op.cc @@ -477,10 +477,12 @@ bool NumpyColumnStackShape(const nnvm::NodeAttrs& attrs, CHECK_EQ(in_attrs->size(), param.num_args); std::vector in_attrs_tmp(param.num_args); TShape dshape; + // For each array in the input, reshape to 2D if ndim < 2. for (int i = 0; i < param.num_args; i++) { if ((*in_attrs)[i].ndim() == 0) { in_attrs_tmp[i] = TShape(2, 1); } else if ((*in_attrs)[i].ndim() == 1) { + // Transpose 1D row into a column. in_attrs_tmp[i] = TShape(2, 1); in_attrs_tmp[i][0] = (*in_attrs)[i][0]; } else { @@ -498,6 +500,7 @@ bool NumpyColumnStackShape(const nnvm::NodeAttrs& attrs, if (dshape.ndim() == -1) { return false; } + // Accumulate along column axis. int cnt = 0, sum = 0, pos = -1; for (int i = 0; i < param.num_args; i++) { TShape tmp = in_attrs_tmp[i]; @@ -527,11 +530,11 @@ bool NumpyColumnStackShape(const nnvm::NodeAttrs& attrs, if (!shape_is_known(dshape)) { return false; } - dshape[1] = sum; if (cnt == 0) { SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape); } else if (cnt == 1) { + // Infer missing dimension if only one column dimension of the input is missing if (pos >= 0) { in_attrs_tmp[pos][1] = out_attrs->at(0)[1] - sum; } else { From a319b771e5b6e5500b39496bc70454207910ce88 Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Thu, 24 Oct 2019 10:00:01 +0000 Subject: [PATCH 07/23] pylint --- python/mxnet/ndarray/numpy/_op.py | 2 +- python/mxnet/numpy/multiarray.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/python/mxnet/ndarray/numpy/_op.py b/python/mxnet/ndarray/numpy/_op.py index ea42b73cb24e..b7bd651bf383 100644 --- a/python/mxnet/ndarray/numpy/_op.py +++ b/python/mxnet/ndarray/numpy/_op.py @@ -3005,7 +3005,7 @@ def get_list(arrays): @set_module('mxnet.ndarray.numpy') def column_stack(tup): - """ + """ Stack 1-D arrays as columns into a 2-D array. Take a sequence of 1-D arrays and stack them as columns diff --git a/python/mxnet/numpy/multiarray.py b/python/mxnet/numpy/multiarray.py index 61426904da12..c62117bf4444 100644 --- a/python/mxnet/numpy/multiarray.py +++ b/python/mxnet/numpy/multiarray.py @@ -4621,7 +4621,7 @@ def vstack(arrays, out=None): @set_module('mxnet.numpy') def column_stack(tup): - """ + """ Stack 1-D arrays as columns into a 2-D array. Take a sequence of 1-D arrays and stack them as columns From cafc11adfebd86e1a3a87ddd5e9f1479074e015e Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Fri, 25 Oct 2019 04:11:51 +0000 Subject: [PATCH 08/23] ci --- python/mxnet/numpy/multiarray.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/mxnet/numpy/multiarray.py b/python/mxnet/numpy/multiarray.py index c62117bf4444..77233ab6b09e 100644 --- a/python/mxnet/numpy/multiarray.py +++ b/python/mxnet/numpy/multiarray.py @@ -4635,7 +4635,7 @@ def column_stack(tup): Arrays to stack. All of them must have the same first dimension. Returns - ------- + -------- stacked : 2-D array The array formed by stacking the given arrays. From 627081b4841f2fb229bd132bcae496c7fab22322 Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Fri, 25 Oct 2019 08:19:50 +0000 Subject: [PATCH 09/23] ci --- python/mxnet/ndarray/numpy/_op.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/mxnet/ndarray/numpy/_op.py b/python/mxnet/ndarray/numpy/_op.py index b7bd651bf383..58d619bdc596 100644 --- a/python/mxnet/ndarray/numpy/_op.py +++ b/python/mxnet/ndarray/numpy/_op.py @@ -3019,7 +3019,7 @@ def column_stack(tup): Arrays to stack. All of them must have the same first dimension. Returns - ------- + -------- stacked : 2-D array The array formed by stacking the given arrays. From 1f703c3a0e327281a1b1e01b7ddb3e2c1377ee9d Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Wed, 30 Oct 2019 07:39:19 +0000 Subject: [PATCH 10/23] ci --- src/operator/numpy/np_matrix_op-inl.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/operator/numpy/np_matrix_op-inl.h b/src/operator/numpy/np_matrix_op-inl.h index fd83d5739e17..2545adcb3555 100644 --- a/src/operator/numpy/np_matrix_op-inl.h +++ b/src/operator/numpy/np_matrix_op-inl.h @@ -58,7 +58,7 @@ struct NumpyColumnStackParam : public dmlc::Parameter { DMLC_DECLARE_FIELD(num_args).set_lower_bound(1) .describe("Number of inputs to be column stacked"); } -} +}; struct NumpyReshapeParam : public dmlc::Parameter { mxnet::TShape newshape; From 27bddf8f1667cd7161f0c41ecabe6123736e403d Mon Sep 17 00:00:00 2001 From: Zhiqiang Xie Date: Wed, 30 Oct 2019 17:34:50 +0800 Subject: [PATCH 11/23] [Numpy] Numpy operator diff (#15906) * numpy diff operator implemented append and prepend not supported yet remove the prepend and append checking interface from the backend refine the code, enrich the test set and all tests passed registered the diff operator into npi scope all tests passed comments and minor modification format codes and fix warning for sanity check minor modification for sanity check fix sanity fix the tolerance bound of testing np.diff resolve minor coding style issue replace the given tests by random picking minor fix * interoperability test added --- python/mxnet/ndarray/numpy/_op.py | 50 +++- python/mxnet/numpy/multiarray.py | 51 +++- python/mxnet/numpy_dispatch_protocol.py | 1 + python/mxnet/symbol/numpy/_symbol.py | 51 +++- src/operator/numpy/np_diff-inl.h | 220 ++++++++++++++++++ src/operator/numpy/np_diff.cc | 109 +++++++++ src/operator/numpy/np_diff.cu | 37 +++ .../unittest/test_numpy_interoperability.py | 24 ++ tests/python/unittest/test_numpy_op.py | 52 +++++ 9 files changed, 592 insertions(+), 3 deletions(-) create mode 100644 src/operator/numpy/np_diff-inl.h create mode 100644 src/operator/numpy/np_diff.cc create mode 100644 src/operator/numpy/np_diff.cu diff --git a/python/mxnet/ndarray/numpy/_op.py b/python/mxnet/ndarray/numpy/_op.py index 84aa4a1572d9..297c40b1431e 100644 --- a/python/mxnet/ndarray/numpy/_op.py +++ b/python/mxnet/ndarray/numpy/_op.py @@ -38,7 +38,7 @@ 'std', 'var', 'indices', 'copysign', 'ravel', 'hanning', 'hamming', 'blackman', 'flip', 'around', 'hypot', 'rad2deg', 'deg2rad', 'unique', 'lcm', 'tril', 'identity', 'take', 'ldexp', 'vdot', 'inner', 'outer', 'equal', 'not_equal', 'greater', 'less', 'greater_equal', 'less_equal', - 'hsplit', 'rot90', 'einsum', 'true_divide', 'nonzero', 'shares_memory', 'may_share_memory'] + 'hsplit', 'rot90', 'einsum', 'true_divide', 'nonzero', 'shares_memory', 'may_share_memory', 'diff'] @set_module('mxnet.ndarray.numpy') @@ -4983,3 +4983,51 @@ def may_share_memory(a, b, max_work=None): - Actually it is same as `shares_memory` in MXNet DeepNumPy """ return _npi.share_memory(a, b).item() + + +def diff(a, n=1, axis=-1, prepend=None, append=None): + r""" + numpy.diff(a, n=1, axis=-1, prepend=, append=) + + Calculate the n-th discrete difference along the given axis. + + Parameters + ---------- + a : ndarray + Input array + n : int, optional + The number of times values are differenced. If zero, the input is returned as-is. + axis : int, optional + The axis along which the difference is taken, default is the last axis. + prepend, append : ndarray, optional + Not supported yet + + Returns + ------- + diff : ndarray + The n-th differences. + The shape of the output is the same as a except along axis where the dimension is smaller by n. + The type of the output is the same as the type of the difference between any two elements of a. + + Examples + -------- + >>> x = np.array([1, 2, 4, 7, 0]) + >>> np.diff(x) + array([ 1, 2, 3, -7]) + >>> np.diff(x, n=2) + array([ 1, 1, -10]) + + >>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]]) + >>> np.diff(x) + array([[2, 3, 4], + [5, 1, 2]]) + >>> np.diff(x, axis=0) + array([[-1, 2, 0, -2]]) + + Notes + ----- + Optional inputs `prepend` and `append` are not supported yet + """ + if (prepend or append): + raise NotImplementedError('prepend and append options are not supported yet') + return _npi.diff(a, n=n, axis=axis) diff --git a/python/mxnet/numpy/multiarray.py b/python/mxnet/numpy/multiarray.py index ef88638c857e..a6d90881da4f 100644 --- a/python/mxnet/numpy/multiarray.py +++ b/python/mxnet/numpy/multiarray.py @@ -56,7 +56,7 @@ 'blackman', 'flip', 'around', 'arctan2', 'hypot', 'rad2deg', 'deg2rad', 'unique', 'lcm', 'tril', 'identity', 'take', 'ldexp', 'vdot', 'inner', 'outer', 'equal', 'not_equal', 'greater', 'less', 'greater_equal', 'less_equal', 'hsplit', 'rot90', 'einsum', 'true_divide', 'nonzero', 'shares_memory', - 'may_share_memory'] + 'may_share_memory', 'diff'] # Return code for dispatching indexing function call _NDARRAY_UNSUPPORTED_INDEXING = -1 @@ -6975,3 +6975,52 @@ def may_share_memory(a, b, max_work=None): - Actually it is same as `shares_memory` in MXNet DeepNumPy """ return _mx_nd_np.may_share_memory(a, b, max_work) + + +def diff(a, n=1, axis=-1, prepend=None, append=None): + r""" + numpy.diff(a, n=1, axis=-1, prepend=, append=) + + Calculate the n-th discrete difference along the given axis. + + Parameters + ---------- + a : ndarray + Input array + n : int, optional + The number of times values are differenced. If zero, the input is returned as-is. + axis : int, optional + The axis along which the difference is taken, default is the last axis. + prepend, append : ndarray, optional + Not supported yet + + Returns + ------- + diff : ndarray + The n-th differences. + The shape of the output is the same as a except along axis where the dimension is smaller by n. + The type of the output is the same as the type of the difference between any two elements of a. + This is the same as the type of a in most cases. + + Examples + -------- + >>> x = np.array([1, 2, 4, 7, 0]) + >>> np.diff(x) + array([ 1, 2, 3, -7]) + >>> np.diff(x, n=2) + array([ 1, 1, -10]) + + >>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]]) + >>> np.diff(x) + array([[2, 3, 4], + [5, 1, 2]]) + >>> np.diff(x, axis=0) + array([[-1, 2, 0, -2]]) + + Notes + ----- + Optional inputs `prepend` and `append` are not supported yet + """ + if (prepend or append): + raise NotImplementedError('prepend and append options are not supported yet') + return _mx_nd_np.diff(a, n=n, axis=axis) diff --git a/python/mxnet/numpy_dispatch_protocol.py b/python/mxnet/numpy_dispatch_protocol.py index 6a5f166a70eb..2411f51b7aa6 100644 --- a/python/mxnet/numpy_dispatch_protocol.py +++ b/python/mxnet/numpy_dispatch_protocol.py @@ -130,6 +130,7 @@ def _run_with_array_ufunc_proto(*args, **kwargs): 'einsum', 'shares_memory', 'may_share_memory', + 'diff', ] diff --git a/python/mxnet/symbol/numpy/_symbol.py b/python/mxnet/symbol/numpy/_symbol.py index 2e6d41446930..6f7f912d6e36 100644 --- a/python/mxnet/symbol/numpy/_symbol.py +++ b/python/mxnet/symbol/numpy/_symbol.py @@ -40,7 +40,7 @@ 'std', 'var', 'indices', 'copysign', 'ravel', 'hanning', 'hamming', 'blackman', 'flip', 'around', 'hypot', 'rad2deg', 'deg2rad', 'unique', 'lcm', 'tril', 'identity', 'take', 'ldexp', 'vdot', 'inner', 'outer', 'equal', 'not_equal', 'greater', 'less', 'greater_equal', - 'less_equal', 'hsplit', 'rot90', 'einsum', 'true_divide', 'shares_memory', 'may_share_memory'] + 'less_equal', 'hsplit', 'rot90', 'einsum', 'true_divide', 'shares_memory', 'may_share_memory', 'diff'] def _num_outputs(sym): @@ -4629,4 +4629,53 @@ def may_share_memory(a, b, max_work=None): return _npi.share_memory(a, b) +def diff(a, n=1, axis=-1, prepend=None, append=None): + r""" + numpy.diff(a, n=1, axis=-1, prepend=, append=) + + Calculate the n-th discrete difference along the given axis. + + Parameters + ---------- + a : ndarray + Input array + n : int, optional + The number of times values are differenced. If zero, the input is returned as-is. + axis : int, optional + The axis along which the difference is taken, default is the last axis. + prepend, append : ndarray, optional + Not supported yet + + Returns + ------- + diff : ndarray + The n-th differences. + The shape of the output is the same as a except along axis where the dimension is smaller by n. + The type of the output is the same as the type of the difference between any two elements of a. + This is the same as the type of a in most cases. + + Examples + -------- + >>> x = np.array([1, 2, 4, 7, 0]) + >>> np.diff(x) + array([ 1, 2, 3, -7]) + >>> np.diff(x, n=2) + array([ 1, 1, -10]) + + >>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]]) + >>> np.diff(x) + array([[2, 3, 4], + [5, 1, 2]]) + >>> np.diff(x, axis=0) + array([[-1, 2, 0, -2]]) + + Notes + ----- + Optional inputs `prepend` and `append` are not supported yet + """ + if (prepend or append): + raise NotImplementedError('prepend and append options are not supported yet') + return _npi.diff(a, n=n, axis=axis) + + _set_np_symbol_class(_Symbol) diff --git a/src/operator/numpy/np_diff-inl.h b/src/operator/numpy/np_diff-inl.h new file mode 100644 index 000000000000..69f175e802dd --- /dev/null +++ b/src/operator/numpy/np_diff-inl.h @@ -0,0 +1,220 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/*! + * \file np_diff-inl.h + * \brief Function definition of numpy-compatible diff operator + */ + +#ifndef MXNET_OPERATOR_NUMPY_NP_DIFF_INL_H_ +#define MXNET_OPERATOR_NUMPY_NP_DIFF_INL_H_ + +#include +#include +#include +#include "../mxnet_op.h" +#include "../operator_common.h" +#include "../tensor/broadcast_reduce_op.h" + +namespace mxnet { +namespace op { + +struct DiffParam : public dmlc::Parameter { + int n, axis; + dmlc::optional prepend; + dmlc::optional append; + DMLC_DECLARE_PARAMETER(DiffParam) { + DMLC_DECLARE_FIELD(n).set_default(1).describe( + "The number of times values are differenced." + " If zero, the input is returned as-is."); + DMLC_DECLARE_FIELD(axis).set_default(-1).describe( + "Axis along which the cumulative sum is computed." + " The default (None) is to compute the diff over the flattened array."); + } +}; + +inline void YanghuiTri(std::vector* buffer, int n) { + // apply basic yanghui's triangular to calculate the factors + (*buffer)[0] = 1; + for (int i = 1; i <= n; ++i) { + (*buffer)[i] = 1; + for (int j = i - 1; j > 0; --j) { + (*buffer)[j] += (*buffer)[j - 1]; + } + } +} + +struct diff_forward { + template + MSHADOW_XINLINE static void Map(int i, int* diffFactor, OType* out, + const IType* in, const int n, + const int stride, + const mshadow::Shape oshape, + const mshadow::Shape ishape) { + using namespace broadcast; + + // j represent the memory index of the corresponding input entry + int j = ravel(unravel(i, oshape), ishape); + int indicator = 1; + out[i] = 0; + for (int k = n; k >= 0; --k) { + out[i] += in[j + stride * k] * indicator * diffFactor[k]; + indicator *= -1; + } + } +}; + +template +void DiffForwardImpl(const OpContext& ctx, const TBlob& in, const TBlob& out, + const int n, const int axis) { + using namespace mshadow; + using namespace mxnet_op; + + // undefined behavior for n < 0 + CHECK_GE(n, 0); + int axis_checked = CheckAxis(axis, in.ndim()); + // nothing in the output + if (n >= in.shape_[axis_checked]) return; + // stride for elements on the given axis, same in input and output + int stride = 1; + for (int i = in.ndim() - 1; i > axis_checked; --i) { + stride *= in.shape_[i]; + } + + Stream* s = ctx.get_stream(); + std::vector buffer(n+1, 0); + YanghuiTri(&buffer, n); + Tensor diffFactor = + ctx.requested[0].get_space_typed(Shape1(n + 1), s); + Copy(diffFactor, Tensor(&buffer[0], Shape1(n + 1), 0), s); + + MSHADOW_TYPE_SWITCH(in.type_flag_, IType, { + MSHADOW_TYPE_SWITCH(out.type_flag_, OType, { + MXNET_NDIM_SWITCH(in.ndim(), ndim, { + Kernel::Launch( + s, out.Size(), diffFactor.dptr_, + out.dptr(), in.dptr(), + n, stride, out.shape_.get(), + in.shape_.get()); + }); + }); + }); +} + +template +void DiffForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, + const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { + using namespace mshadow; + using namespace mxnet_op; + CHECK_EQ(inputs.size(), 1U); + CHECK_EQ(req.size(), 1U); + CHECK_EQ(outputs.size(), 1U); + const DiffParam& param = nnvm::get(attrs.parsed); + + DiffForwardImpl(ctx, inputs[0], outputs[0], param.n, param.axis); +} + +struct diff_backward { + template + MSHADOW_XINLINE static void Map(int i, int* diffFactor, OType* igrad, + const IType* ograd, const int n, + const int stride, const int axis, + const mshadow::Shape oshape, + const mshadow::Shape ishape) { + using namespace broadcast; + if (n == 0) { + igrad[i] = ograd[i]; + return; + } + + Shape coor = unravel(i, oshape); + // one head thread for a whole sequence along the axis + if (coor[axis] != 0) return; + int j = ravel(coor, ishape); + // initialize the elements of output array + for (int k = 0; k < oshape[axis]; ++k) igrad[i + k * stride] = 0; + for (int k = 0; k < ishape[axis]; ++k) { + int indicator = 1; + for (int m = n; m >= 0; --m) { + igrad[i + (m + k) * stride] += + ograd[j + k * stride] * indicator * diffFactor[m]; + indicator *= -1; + } + } + } +}; + +template +void DiffBackwardImpl(const OpContext& ctx, const TBlob& ograd, + const TBlob& igrad, const int n, const int axis) { + using namespace mshadow; + using namespace mxnet_op; + + // undefined behavior for n < 0 + CHECK_GE(n, 0); + int axis_checked = CheckAxis(axis, igrad.ndim()); + // nothing in the ograd and igrad + if (n >= igrad.shape_[axis_checked]) return; + // stride for elements on the given axis, same in input and output + int stride = 1; + for (int i = igrad.ndim() - 1; i > axis_checked; --i) { + stride *= igrad.shape_[i]; + } + + Stream* s = ctx.get_stream(); + std::vector buffer(n+1, 0); + YanghuiTri(&buffer, n); + Tensor diffFactor = + ctx.requested[0].get_space_typed(Shape1(n + 1), s); + Copy(diffFactor, Tensor(&buffer[0], Shape1(n + 1), 0), s); + + MSHADOW_TYPE_SWITCH(ograd.type_flag_, IType, { + MSHADOW_TYPE_SWITCH(igrad.type_flag_, OType, { + MXNET_NDIM_SWITCH(igrad.ndim(), ndim, { + Kernel::Launch( + s, igrad.Size(), diffFactor.dptr_, + igrad.dptr(), ograd.dptr(), + n, stride, axis_checked, + igrad.shape_.get(), ograd.shape_.get()); + }); + }); + }); +} + +template +void DiffBackward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, + const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { + using namespace mshadow; + using namespace mxnet_op; + CHECK_EQ(inputs.size(), 1U); + CHECK_EQ(req.size(), 1U); + CHECK_EQ(outputs.size(), 1U); + const DiffParam& param = nnvm::get(attrs.parsed); + + DiffBackwardImpl(ctx, inputs[0], outputs[0], param.n, param.axis); +} + +} // namespace op +} // namespace mxnet + +#endif // MXNET_OPERATOR_NUMPY_NP_DIFF_INL_H_ diff --git a/src/operator/numpy/np_diff.cc b/src/operator/numpy/np_diff.cc new file mode 100644 index 000000000000..a3dae332d842 --- /dev/null +++ b/src/operator/numpy/np_diff.cc @@ -0,0 +1,109 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/*! + * \file np_diff.cc + * \brief CPU implementation of numpy-compatible diff operator + */ + +#include "./np_diff-inl.h" + +namespace mxnet { +namespace op { + +inline TShape NumpyDiffShapeImpl(const TShape& ishape, + const int n, + const int axis) { + CHECK_GE(n, 0); + int axis_checked = CheckAxis(axis, ishape.ndim()); + + TShape oshape = ishape; + if (n >= ishape[axis_checked]) { + oshape[axis_checked] = 0; + } else { + oshape[axis_checked] -= n; + } + return oshape; +} + +inline bool DiffShape(const nnvm::NodeAttrs& attrs, + std::vector* in_attrs, + std::vector* out_attrs) { + CHECK_EQ(in_attrs->size(), 1U); + CHECK_EQ(out_attrs->size(), 1U); + if (!shape_is_known(in_attrs->at(0))) { + return false; + } + const DiffParam& param = nnvm::get(attrs.parsed); + SHAPE_ASSIGN_CHECK(*out_attrs, 0, + NumpyDiffShapeImpl((*in_attrs)[0], param.n, param.axis)); + return shape_is_known(out_attrs->at(0)); +} + +inline bool DiffType(const nnvm::NodeAttrs& attrs, + std::vector* in_attrs, + std::vector* out_attrs) { + CHECK_EQ(in_attrs->size(), 1U); + CHECK_EQ(out_attrs->size(), 1U); + + TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0)); + TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0)); + + return out_attrs->at(0) != -1 && in_attrs->at(0) != -1; +} + +DMLC_REGISTER_PARAMETER(DiffParam); + +NNVM_REGISTER_OP(_npi_diff) +.set_attr_parser(ParamParser) +.set_num_inputs(1) +.set_num_outputs(1) +.set_attr("FListInputNames", + [](const NodeAttrs& attrs) { + return std::vector{"a"}; + }) +.set_attr("FInferShape", DiffShape) +.set_attr("FInferType", DiffType) +.set_attr("FResourceRequest", + [](const NodeAttrs& attrs) { + return std::vector{ResourceRequest::kTempSpace}; + }) +.set_attr("FCompute", DiffForward) +.set_attr("FGradient", + ElemwiseGradUseNone{"_backward_npi_diff"}) +.set_attr("FInplaceOption", + [](const NodeAttrs& attrs) { + return std::vector >{{0, 0}}; + }) +.add_argument("a", "NDArray-or-Symbol", "Input ndarray") +.add_arguments(DiffParam::__FIELDS__()); + +NNVM_REGISTER_OP(_backward_npi_diff) +.set_attr_parser(ParamParser) +.set_num_inputs(1) +.set_num_outputs(1) +.set_attr("TIsBackward", true) +.set_attr("FResourceRequest", + [](const NodeAttrs& attrs) { + return std::vector{ResourceRequest::kTempSpace}; + }) +.set_attr("FCompute", DiffBackward); + +} // namespace op +} // namespace mxnet diff --git a/src/operator/numpy/np_diff.cu b/src/operator/numpy/np_diff.cu new file mode 100644 index 000000000000..daea6e368e05 --- /dev/null +++ b/src/operator/numpy/np_diff.cu @@ -0,0 +1,37 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/*! + * \file np_diff.cu + * \brief GPU implementation of numpy-compatible diff operator + */ + +#include "./np_diff-inl.h" + +namespace mxnet { +namespace op { + +NNVM_REGISTER_OP(_npi_diff) +.set_attr("FCompute", DiffForward); + +NNVM_REGISTER_OP(_backward_npi_diff) +.set_attr("FCompute", DiffBackward); + +} // namespace op +} // namespace mxnet diff --git a/tests/python/unittest/test_numpy_interoperability.py b/tests/python/unittest/test_numpy_interoperability.py index 624fc0a107b0..103f2c117ea6 100644 --- a/tests/python/unittest/test_numpy_interoperability.py +++ b/tests/python/unittest/test_numpy_interoperability.py @@ -1085,6 +1085,29 @@ def _add_workload_nonzero(): OpArgMngr.add_workload('nonzero', np.array([True, False, False], dtype=np.bool_)) +def _add_workload_diff(): + x = np.array([1, 4, 6, 7, 12]) + OpArgMngr.add_workload('diff', x) + OpArgMngr.add_workload('diff', x, 2) + OpArgMngr.add_workload('diff', x, 3) + OpArgMngr.add_workload('diff', np.array([1.1, 2.2, 3.0, -0.2, -0.1])) + x = np.zeros((10, 20, 30)) + x[:, 1::2, :] = 1 + OpArgMngr.add_workload('diff', x) + OpArgMngr.add_workload('diff', x, axis=-1) + OpArgMngr.add_workload('diff', x, axis=0) + OpArgMngr.add_workload('diff', x, axis=1) + OpArgMngr.add_workload('diff', x, axis=-2) + x = 20 * np.random.uniform(size=(10,20,30)) + OpArgMngr.add_workload('diff', x) + OpArgMngr.add_workload('diff', x, n=2) + OpArgMngr.add_workload('diff', x, axis=0) + OpArgMngr.add_workload('diff', x, n=2, axis=0) + x = np.array([list(range(3))]) + for n in range(1, 5): + OpArgMngr.add_workload('diff', x, n=n) + + @use_np def _prepare_workloads(): array_pool = { @@ -1190,6 +1213,7 @@ def _prepare_workloads(): _add_workload_greater_equal(array_pool) _add_workload_less(array_pool) _add_workload_less_equal(array_pool) + _add_workload_diff() _prepare_workloads() diff --git a/tests/python/unittest/test_numpy_op.py b/tests/python/unittest/test_numpy_op.py index bfe6c3d43b50..67c1ede6cc1a 100644 --- a/tests/python/unittest/test_numpy_op.py +++ b/tests/python/unittest/test_numpy_op.py @@ -3764,6 +3764,58 @@ def test_np_share_memory(): assert not op(np.ones((5, 0), dtype=dt), np.ones((0, 3, 0), dtype=adt)) +@with_seed() +@use_np +def test_np_diff(): + def np_diff_backward(ograd, n, axis): + res = ograd + for i in range(n): + res = _np.negative(_np.diff(res, n=1, axis=axis, prepend=0, append=0)) + return res + + class TestDiff(HybridBlock): + def __init__(self, n=1, axis=-1): + super(TestDiff, self).__init__() + self._n = n + self._axis = axis + + def hybrid_forward(self, F, a): + return F.np.diff(a, n=self._n, axis=self._axis) + + shapes = [tuple(random.randrange(10) for i in range(random.randrange(6))) for j in range(5)] + for hybridize in [True, False]: + for shape in shapes: + for axis in [i for i in range(-len(shape), len(shape))]: + for n in [i for i in range(0, shape[axis]+1)]: + test_np_diff = TestDiff(n=n, axis=axis) + if hybridize: + test_np_diff.hybridize() + for itype in [_np.float16, _np.float32, _np.float64]: + # note the tolerance shall be scaled by the input n + if itype == _np.float16: + rtol = atol = 1e-2*len(shape)*n + else: + rtol = atol = 1e-5*len(shape)*n + x = rand_ndarray(shape).astype(itype).as_np_ndarray() + x.attach_grad() + np_out = _np.diff(x.asnumpy(), n=n, axis=axis) + with mx.autograd.record(): + mx_out = test_np_diff(x) + assert mx_out.shape == np_out.shape + assert_almost_equal(mx_out.asnumpy(), np_out, rtol=rtol, atol=atol) + mx_out.backward() + if (np_out.size == 0): + np_backward = _np.zeros(shape) + else: + np_backward = np_diff_backward(_np.ones(np_out.shape, dtype=itype), n=n, axis=axis) + assert x.grad.shape == np_backward.shape + assert_almost_equal(x.grad.asnumpy(), np_backward, rtol=rtol, atol=atol) + + mx_out = np.diff(x, n=n, axis=axis) + np_out = _np.diff(x.asnumpy(), n=n, axis=axis) + assert_almost_equal(mx_out.asnumpy(), np_out, rtol=rtol, atol=atol) + + if __name__ == '__main__': import nose nose.runmodule() From 9465e06ff5a53065ee40bcdfba958edef329b971 Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Wed, 23 Oct 2019 14:19:12 +0000 Subject: [PATCH 12/23] implement np.column_stack --- python/mxnet/ndarray/numpy/_op.py | 33 ++++- python/mxnet/numpy/multiarray.py | 33 ++++- python/mxnet/numpy_dispatch_protocol.py | 1 + python/mxnet/symbol/numpy/_symbol.py | 33 ++++- src/operator/numpy/np_matrix_op-inl.h | 78 ++++++++++++ src/operator/numpy/np_matrix_op.cc | 158 +++++++++++++++++++++++- src/operator/numpy/np_matrix_op.cu | 6 + tests/python/unittest/test_numpy_op.py | 54 ++++++++ 8 files changed, 391 insertions(+), 5 deletions(-) diff --git a/python/mxnet/ndarray/numpy/_op.py b/python/mxnet/ndarray/numpy/_op.py index 297c40b1431e..c8991b46594b 100644 --- a/python/mxnet/ndarray/numpy/_op.py +++ b/python/mxnet/ndarray/numpy/_op.py @@ -34,7 +34,7 @@ 'log1p', 'rint', 'radians', 'reciprocal', 'square', 'negative', 'fix', 'ceil', 'floor', 'trunc', 'logical_not', 'arcsinh', 'arccosh', 'arctanh', 'tensordot', 'histogram', 'eye', 'linspace', 'logspace', 'expand_dims', 'tile', 'arange', 'split', 'vsplit', 'concatenate', - 'stack', 'vstack', 'dstack', 'mean', 'maximum', 'minimum', 'swapaxes', 'clip', 'argmax', 'argmin', + 'stack', 'vstack', 'column_stack', 'dstack', 'mean', 'maximum', 'minimum', 'swapaxes', 'clip', 'argmax', 'argmin', 'std', 'var', 'indices', 'copysign', 'ravel', 'hanning', 'hamming', 'blackman', 'flip', 'around', 'hypot', 'rad2deg', 'deg2rad', 'unique', 'lcm', 'tril', 'identity', 'take', 'ldexp', 'vdot', 'inner', 'outer', 'equal', 'not_equal', 'greater', 'less', 'greater_equal', 'less_equal', @@ -3004,6 +3004,37 @@ def get_list(arrays): return _npi.vstack(*arrays) +@set_module('mxnet.ndarray.numpy') +def column_stack(tup): + """ + Stack 1-D arrays as columns into a 2-D array. + Take a sequence of 1-D arrays and stack them as columns + to make a single 2-D array. 2-D arrays are stacked as-is, + just like with `hstack`. 1-D arrays are turned into 2-D columns + first. + Parameters + ---------- + tup : sequence of 1-D or 2-D arrays. + Arrays to stack. All of them must have the same first dimension. + Returns + -------- + stacked : 2-D array + The array formed by stacking the given arrays. + See Also + -------- + stack, hstack, vstack, concatenate + Examples + -------- + >>> a = np.array((1,2,3)) + >>> b = np.array((2,3,4)) + >>> np.column_stack((a,b)) + array([[1., 2.], + [2., 3.], + [3., 4.]]) + """ + return _npi.column_stack(*tup) + + @set_module('mxnet.ndarray.numpy') def dstack(arrays): """ diff --git a/python/mxnet/numpy/multiarray.py b/python/mxnet/numpy/multiarray.py index a6d90881da4f..e854aab759ca 100644 --- a/python/mxnet/numpy/multiarray.py +++ b/python/mxnet/numpy/multiarray.py @@ -51,7 +51,7 @@ 'degrees', 'log2', 'log1p', 'rint', 'radians', 'reciprocal', 'square', 'negative', 'fix', 'ceil', 'floor', 'trunc', 'logical_not', 'arcsinh', 'arccosh', 'arctanh', 'tensordot', 'histogram', 'eye', 'linspace', 'logspace', 'expand_dims', 'tile', 'arange', - 'split', 'vsplit', 'concatenate', 'stack', 'vstack', 'dstack', 'mean', 'maximum', 'minimum', + 'split', 'vsplit', 'concatenate', 'stack', 'vstack', 'column_stack', 'dstack', 'mean', 'maximum', 'minimum', 'swapaxes', 'clip', 'argmax', 'argmin', 'std', 'var', 'indices', 'copysign', 'ravel', 'hanning', 'hamming', 'blackman', 'flip', 'around', 'arctan2', 'hypot', 'rad2deg', 'deg2rad', 'unique', 'lcm', 'tril', 'identity', 'take', 'ldexp', 'vdot', 'inner', 'outer', 'equal', 'not_equal', 'greater', 'less', @@ -4899,6 +4899,37 @@ def vstack(arrays, out=None): return _mx_nd_np.vstack(arrays) +@set_module('mxnet.numpy') +def column_stack(tup): + """ + Stack 1-D arrays as columns into a 2-D array. + Take a sequence of 1-D arrays and stack them as columns + to make a single 2-D array. 2-D arrays are stacked as-is, + just like with `hstack`. 1-D arrays are turned into 2-D columns + first. + Parameters + ---------- + tup : sequence of 1-D or 2-D arrays. + Arrays to stack. All of them must have the same first dimension. + Returns + -------- + stacked : 2-D array + The array formed by stacking the given arrays. + See Also + -------- + stack, hstack, vstack, concatenate + Examples + -------- + >>> a = np.array((1,2,3)) + >>> b = np.array((2,3,4)) + >>> np.column_stack((a,b)) + array([[1., 2.], + [2., 3.], + [3., 4.]]) + """ + return _mx_nd_np.column_stack(tup) + + @set_module('mxnet.numpy') def dstack(arrays): """ diff --git a/python/mxnet/numpy_dispatch_protocol.py b/python/mxnet/numpy_dispatch_protocol.py index 2411f51b7aa6..cfab2a49699d 100644 --- a/python/mxnet/numpy_dispatch_protocol.py +++ b/python/mxnet/numpy_dispatch_protocol.py @@ -121,6 +121,7 @@ def _run_with_array_ufunc_proto(*args, **kwargs): 'var', 'vdot', 'vstack', + 'column_stack', 'zeros_like', 'linalg.norm', 'trace', diff --git a/python/mxnet/symbol/numpy/_symbol.py b/python/mxnet/symbol/numpy/_symbol.py index 6f7f912d6e36..87543574c502 100644 --- a/python/mxnet/symbol/numpy/_symbol.py +++ b/python/mxnet/symbol/numpy/_symbol.py @@ -36,7 +36,7 @@ 'rint', 'radians', 'reciprocal', 'square', 'negative', 'fix', 'ceil', 'floor', 'trunc', 'logical_not', 'arcsinh', 'arccosh', 'arctanh', 'tensordot', 'histogram', 'eye', 'linspace', 'logspace', 'expand_dims', 'tile', 'arange', 'split', 'vsplit', 'concatenate', - 'stack', 'vstack', 'dstack', 'mean', 'maximum', 'minimum', 'swapaxes', 'clip', 'argmax', 'argmin', + 'stack', 'vstack', 'column_stack', 'dstack', 'mean', 'maximum', 'minimum', 'swapaxes', 'clip', 'argmax', 'argmin', 'std', 'var', 'indices', 'copysign', 'ravel', 'hanning', 'hamming', 'blackman', 'flip', 'around', 'hypot', 'rad2deg', 'deg2rad', 'unique', 'lcm', 'tril', 'identity', 'take', 'ldexp', 'vdot', 'inner', 'outer', 'equal', 'not_equal', 'greater', 'less', 'greater_equal', @@ -4590,6 +4590,37 @@ def einsum(*operands, **kwargs): return _npi.einsum(*operands, subscripts=subscripts, out=out, optimize=int(optimize_arg)) +@set_module('mxnet.symbol.numpy') +def column_stack(tup): + """ + Stack 1-D arrays as columns into a 2-D array. + Take a sequence of 1-D arrays and stack them as columns + to make a single 2-D array. 2-D arrays are stacked as-is, + just like with `hstack`. 1-D arrays are turned into 2-D columns + first. + Parameters + ---------- + tup : sequence of 1-D or 2-D arrays. + Arrays to stack. All of them must have the same first dimension. + Returns + ------- + stacked : 2-D array + The array formed by stacking the given arrays. + See Also + -------- + stack, hstack, vstack, concatenate + Examples + -------- + >>> a = np.array((1,2,3)) + >>> b = np.array((2,3,4)) + >>> np.column_stack((a,b)) + array([[1., 2.], + [2., 3.], + [3., 4.]]) + """ + return _npi.column_stack(*tup) + + @set_module('mxnet.symbol.numpy') def shares_memory(a, b, max_work=None): """ diff --git a/src/operator/numpy/np_matrix_op-inl.h b/src/operator/numpy/np_matrix_op-inl.h index 9ce84835f1a8..c1f5a1b1d8dc 100644 --- a/src/operator/numpy/np_matrix_op-inl.h +++ b/src/operator/numpy/np_matrix_op-inl.h @@ -101,6 +101,12 @@ struct NumpyXReshapeParam : public dmlc::Parameter { " back to the first axis index changing slowest." " Note that currently only C-like order is" " supported"); + +struct NumpyColumnStackParam : public dmlc::Parameter { + int num_args; + DMLC_DECLARE_PARAMETER(NumpyColumnStackParam) { + DMLC_DECLARE_FIELD(num_args).set_lower_bound(1) + .describe("Number of inputs to be column stacked"); } }; @@ -124,6 +130,78 @@ void NumpyTranspose(const nnvm::NodeAttrs& attrs, } } +template +void NumpyColumnStackForward(const nnvm::NodeAttrs& attrs, + const OpContext& ctx, + const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { + using namespace mshadow; + using namespace mshadow_op; + + const NumpyColumnStackParam& param = nnvm::get(attrs.parsed); + CHECK_EQ(inputs.size(), param.num_args); + CHECK_EQ(outputs.size(), 1); + CHECK_EQ(req.size(), 1); + + // reshape if necessary + std::vector data(param.num_args); + for (int i = 0; i < param.num_args; i++) { + if (inputs[i].shape_.ndim() == 0 || inputs[i].shape_.ndim() == 1) { + TShape shape = Shape2(inputs[i].shape_.Size(), 1); + data[i] = inputs[i].reshape(shape); + } else { + data[i] = inputs[i]; + } + } + + // initialize ConcatOp + ConcatParam cparam; + cparam.num_args = param.num_args; + cparam.dim = 1; + MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, { + ConcatOp op; + op.Init(cparam); + op.Forward(ctx, data, req, outputs); + }); +} + +template +void NumpyColumnStackBackward(const nnvm::NodeAttrs& attrs, + const OpContext& ctx, + const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { + using namespace mshadow; + using namespace mshadow_op; + + const NumpyColumnStackParam& param = nnvm::get(attrs.parsed); + CHECK_EQ(inputs.size(), 1); + CHECK_EQ(outputs.size(), param.num_args); + CHECK_EQ(req.size(), param.num_args); + + // reshape if necessary + std::vector data(param.num_args); + for (int i = 0; i < param.num_args; i++) { + if (outputs[i].shape_.ndim() == 0 || outputs[i].shape_.ndim() == 1) { + TShape shape = Shape2(outputs[i].shape_.Size(), 1); + data[i] = outputs[i].reshape(shape); + } else { + data[i] = outputs[i]; + } + } + + // initialize ConcatOp + ConcatParam cparam; + cparam.num_args = param.num_args; + cparam.dim = 1; + MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, { + ConcatOp op; + op.Init(cparam); + op.Backward(ctx, inputs[0], req, data); + }); +} + template void NumpyVstackForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, diff --git a/src/operator/numpy/np_matrix_op.cc b/src/operator/numpy/np_matrix_op.cc index 0a6f9a150d8b..173c905c7590 100644 --- a/src/operator/numpy/np_matrix_op.cc +++ b/src/operator/numpy/np_matrix_op.cc @@ -24,6 +24,7 @@ */ #include +#include #include "./np_matrix_op-inl.h" #include "../nn/concat-inl.h" @@ -613,6 +614,161 @@ Examples:: .add_argument("data", "NDArray-or-Symbol[]", "List of arrays to stack") .add_arguments(StackParam::__FIELDS__()); +void dbg_print_shape(TShape &shape, const char *msg = nullptr) { + using namespace std; + if (msg) + cout << "dbg_print_shape: " << msg << " "; + else + cout << "dbg_print_shape: "; + for (int i = 0; i < shape.ndim(); ++i) + cout << shape[i] << " "; + cout << endl; +} + +bool NumpyColumnStackType(const nnvm::NodeAttrs& attrs, + std::vector *in_type, + std::vector *out_type) { + const NumpyColumnStackParam& param = nnvm::get(attrs.parsed); + CHECK_EQ(in_type->size(), param.num_args); + CHECK_EQ(out_type->size(), 1); + int dtype = -1; + for (int i = 0; i < param.num_args; i++) { + if (dtype == -1) { + dtype = in_type->at(i); + } + } + if (dtype == -1) { + dtype = out_type->at(0); + } + for (int i = 0; i < param.num_args; i++) { + TYPE_ASSIGN_CHECK(*in_type, i, dtype); + } + TYPE_ASSIGN_CHECK(*out_type, 0, dtype); + return dtype != -1; +} + +bool NumpyColumnStackShape(const nnvm::NodeAttrs& attrs, + mxnet::ShapeVector* in_attrs, + mxnet::ShapeVector* out_attrs) { + + CHECK_EQ(out_attrs->size(), 1U); + const NumpyColumnStackParam& param = nnvm::get(attrs.parsed); + CHECK_EQ(in_attrs->size(), param.num_args); + std::vector in_attrs_tmp(param.num_args); + TShape dshape; + for (int i = 0; i < param.num_args; i++) { + if ((*in_attrs)[i].ndim() == 0) { + in_attrs_tmp[i] = TShape(2, 1); + } else if ((*in_attrs)[i].ndim() == 1) { + in_attrs_tmp[i] = TShape(2, 1); + in_attrs_tmp[i][0] = (*in_attrs)[i][0]; + } else { + in_attrs_tmp[i] = (*in_attrs)[i]; + } + TShape tmp(in_attrs_tmp[i].ndim(), -1); + shape_assign(&dshape, tmp); + } + TShape tmp((*out_attrs)[0].ndim(), -1); + shape_assign(&dshape, tmp); + for (int i = 0; i < param.num_args; i++) { + SHAPE_ASSIGN_CHECK(in_attrs_tmp, i, dshape) + } + SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape) + if (dshape.ndim() == -1) { + return false; + } + int cnt = 0, sum = 0, pos = -1; + for (int i = 0; i < param.num_args; i++) { + TShape tmp = in_attrs_tmp[i]; + if (!dim_size_is_known(tmp, 1)) { + cnt++; + pos = i; + } else { + sum += tmp[1]; + } + tmp[1] = -1; + shape_assign(&dshape, tmp); + } + tmp = out_attrs->at(0); + if (!dim_size_is_known(tmp, 1)) { + cnt++; + pos = -1; + } else { + sum += tmp[1]; + } + tmp[1] = -1; + shape_assign(&dshape, tmp); + for (int i = 0; i < param.num_args; i++) { + SHAPE_ASSIGN_CHECK(in_attrs_tmp, i, dshape) + } + SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape)\ + dshape[1] = 0; + if (!shape_is_known(dshape)) { + return false; + } + + dshape[1] = sum; + if (cnt == 0) { + SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape); + } else if (cnt == 1) { + if (pos >= 0) { + in_attrs_tmp[pos][1] = out_attrs->at(0)[1] - sum; + } else { + out_attrs->at(0)[1] = sum; + } + } else { + return false; + } + for (int i = 0; i < param.num_args; i++) { + if (in_attrs->at(i).ndim() == 1) { + in_attrs->at(i)[0] = in_attrs_tmp[i][1]; + } else if (in_attrs->at(i).ndim() >= 2) { + in_attrs->at(i) = in_attrs_tmp[i]; + } + } + + return true; +} + +DMLC_REGISTER_PARAMETER(NumpyColumnStackParam); + +NNVM_REGISTER_OP(_npi_column_stack) +.describe(R"code()code" ADD_FILELINE) +.set_attr_parser(ParamParser) +.set_num_inputs([](const nnvm::NodeAttrs& attrs) { + const NumpyColumnStackParam& param = dmlc::get(attrs.parsed); + return static_cast(param.num_args); +}) +.set_num_outputs(1) +.set_attr("FListInputNames", + [](const nnvm::NodeAttrs& attrs) { + int num_args = dmlc::get(attrs.parsed).num_args; + std::vector ret; + for (int i = 0; i < num_args; ++i) { + ret.push_back(std::string("arg") + std::to_string(i)); + } + return ret; + }) +.set_attr("key_var_num_args", "num_args") +.set_attr("FInferShape", NumpyColumnStackShape) +.set_attr("FInferType", NumpyColumnStackType) +.set_attr("FCompute", NumpyColumnStackForward) +.set_attr("FGradient", ElemwiseGradUseNone{"_backward_np_column_stack"}) +.add_argument("data", "NDArray-or-Symbol[]", "List of arrays to column_stack") +.add_arguments(NumpyColumnStackParam::__FIELDS__()); + +NNVM_REGISTER_OP(_backward_np_column_stack) +.set_attr_parser(ParamParser) +.set_num_inputs(1) +.set_num_outputs([](const nnvm::NodeAttrs& attrs) { + const NumpyColumnStackParam& param = dmlc::get(attrs.parsed); + return static_cast(param.num_args); +}) +.set_attr("TIsBackward", true) +.set_attr("FCompute", NumpyColumnStackBackward); + +DMLC_REGISTER_PARAMETER(NumpyVstackParam); + bool NumpyVstackType(const nnvm::NodeAttrs& attrs, std::vector *in_type, std::vector *out_type) { @@ -718,8 +874,6 @@ bool NumpyVstackShape(const nnvm::NodeAttrs& attrs, return true; } -DMLC_REGISTER_PARAMETER(NumpyVstackParam); - NNVM_REGISTER_OP(_npi_vstack) .describe(R"code()code" ADD_FILELINE) .set_attr_parser(ParamParser) diff --git a/src/operator/numpy/np_matrix_op.cu b/src/operator/numpy/np_matrix_op.cu index 6b4f7a11a9a2..fccc8f257e64 100644 --- a/src/operator/numpy/np_matrix_op.cu +++ b/src/operator/numpy/np_matrix_op.cu @@ -59,6 +59,12 @@ NNVM_REGISTER_OP(_npi_dstack) NNVM_REGISTER_OP(_backward_np_dstack) .set_attr("FCompute", DStackGradCompute); +NNVM_REGISTER_OP(_npi_column_stack) +.set_attr("FCompute", NumpyColumnStackForward); + +NNVM_REGISTER_OP(_backward_np_column_stack) +.set_attr("FCompute", NumpyColumnStackBackward); + NNVM_REGISTER_OP(_np_roll) .set_attr("FCompute", NumpyRollCompute); diff --git a/tests/python/unittest/test_numpy_op.py b/tests/python/unittest/test_numpy_op.py index 67c1ede6cc1a..0c8068cc2261 100644 --- a/tests/python/unittest/test_numpy_op.py +++ b/tests/python/unittest/test_numpy_op.py @@ -3816,6 +3816,60 @@ def hybrid_forward(self, F, a): assert_almost_equal(mx_out.asnumpy(), np_out, rtol=rtol, atol=atol) +@with_seed() +@use_np +def test_np_column_stack(): + class TestColumnStack(HybridBlock): + def __init__(self): + super(TestColumnStack, self).__init__() + + def hybrid_forward(self, F, a, *args): + return F.np.column_stack([a] + list(args)) + + def g(data): + return _np.ones_like(data) + + configs = [ + ((), (), ()), + ((2), (2), (2)), + ((0), (0), (0)), + ((0, 3, 0), (0, 0, 0), (0, 1, 0)), + ((2, 2), (2, 1), (2, 3)), + ((4, 3), (4, 0), (4, 1)), + ((2, 2, 2), (2, 4, 2), (2, 2, 2)), + ((0, 1, 1), (0, 1, 1), (0, 1, 1)) + ] + types = ['float16', 'float32', 'float64', 'int8', 'int32', 'int64'] + for config, hybridize, dtype in itertools.product(configs, [True, False], types): + test_column_stack = TestColumnStack() + if hybridize: + test_column_stack.hybridize() + rtol = 1e-3 + atol = 1e-5 + v = [] + v_np = [] + for i in range(3): + v_np.append(_np.array(_np.random.uniform(-10.0, 10.0, config[i]), dtype=dtype)) + v.append(mx.nd.array(v_np[i]).as_np_ndarray()) + v[i].attach_grad() + expected_np = _np.column_stack(v_np) + with mx.autograd.record(): + mx_out = test_column_stack(*v) + assert mx_out.shape == expected_np.shape + assert_almost_equal(mx_out.asnumpy(), expected_np, rtol=rtol, atol=atol) + + # Test gradient + mx_out.backward() + for i in range(3): + expected_grad = g(v_np[i]) + assert_almost_equal(v[i].grad.asnumpy(), expected_grad, rtol=rtol, atol=atol) + + # Test imperative once again + mx_out = np.column_stack(v) + expected_np = _np.column_stack(v_np) + assert_almost_equal(mx_out.asnumpy(), expected_np, rtol=rtol, atol=atol) + + if __name__ == '__main__': import nose nose.runmodule() From 7aa379728c07ee2baf19412c8c3c94c8a498fea9 Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Wed, 23 Oct 2019 15:03:59 +0000 Subject: [PATCH 13/23] cpplint --- src/operator/numpy/np_matrix_op.cc | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/src/operator/numpy/np_matrix_op.cc b/src/operator/numpy/np_matrix_op.cc index 173c905c7590..27c94e02210a 100644 --- a/src/operator/numpy/np_matrix_op.cc +++ b/src/operator/numpy/np_matrix_op.cc @@ -24,7 +24,6 @@ */ #include -#include #include "./np_matrix_op-inl.h" #include "../nn/concat-inl.h" @@ -614,17 +613,6 @@ Examples:: .add_argument("data", "NDArray-or-Symbol[]", "List of arrays to stack") .add_arguments(StackParam::__FIELDS__()); -void dbg_print_shape(TShape &shape, const char *msg = nullptr) { - using namespace std; - if (msg) - cout << "dbg_print_shape: " << msg << " "; - else - cout << "dbg_print_shape: "; - for (int i = 0; i < shape.ndim(); ++i) - cout << shape[i] << " "; - cout << endl; -} - bool NumpyColumnStackType(const nnvm::NodeAttrs& attrs, std::vector *in_type, std::vector *out_type) { @@ -650,7 +638,6 @@ bool NumpyColumnStackType(const nnvm::NodeAttrs& attrs, bool NumpyColumnStackShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector* in_attrs, mxnet::ShapeVector* out_attrs) { - CHECK_EQ(out_attrs->size(), 1U); const NumpyColumnStackParam& param = nnvm::get(attrs.parsed); CHECK_EQ(in_attrs->size(), param.num_args); From 48fc7288f871e8b00d0721a7681a4715c040ee76 Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Wed, 23 Oct 2019 15:49:13 +0000 Subject: [PATCH 14/23] remove column_stack from numpy interoperability test temporarily --- python/mxnet/numpy_dispatch_protocol.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/mxnet/numpy_dispatch_protocol.py b/python/mxnet/numpy_dispatch_protocol.py index cfab2a49699d..b2c7419b36b2 100644 --- a/python/mxnet/numpy_dispatch_protocol.py +++ b/python/mxnet/numpy_dispatch_protocol.py @@ -121,7 +121,7 @@ def _run_with_array_ufunc_proto(*args, **kwargs): 'var', 'vdot', 'vstack', - 'column_stack', + # 'column_stack', 'zeros_like', 'linalg.norm', 'trace', From b819cfd12fe755ff17e73907987c669c5ceefad9 Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Thu, 24 Oct 2019 05:36:56 +0000 Subject: [PATCH 15/23] style and test fix --- python/mxnet/ndarray/numpy/_op.py | 5 +- python/mxnet/numpy/multiarray.py | 19 +++++++- python/mxnet/numpy_dispatch_protocol.py | 2 +- python/mxnet/symbol/numpy/_symbol.py | 48 +++++++++++++++++++ src/operator/numpy/np_matrix_op.cc | 8 ++-- .../unittest/test_numpy_interoperability.py | 6 +++ tests/python/unittest/test_numpy_op.py | 1 + 7 files changed, 81 insertions(+), 8 deletions(-) diff --git a/python/mxnet/ndarray/numpy/_op.py b/python/mxnet/ndarray/numpy/_op.py index c8991b46594b..0747990d7bdf 100644 --- a/python/mxnet/ndarray/numpy/_op.py +++ b/python/mxnet/ndarray/numpy/_op.py @@ -3016,13 +3016,16 @@ def column_stack(tup): ---------- tup : sequence of 1-D or 2-D arrays. Arrays to stack. All of them must have the same first dimension. + Returns - -------- + ------- stacked : 2-D array The array formed by stacking the given arrays. + See Also -------- stack, hstack, vstack, concatenate + Examples -------- >>> a = np.array((1,2,3)) diff --git a/python/mxnet/numpy/multiarray.py b/python/mxnet/numpy/multiarray.py index e854aab759ca..669e6c015ce6 100644 --- a/python/mxnet/numpy/multiarray.py +++ b/python/mxnet/numpy/multiarray.py @@ -50,6 +50,7 @@ 'sqrt', 'cbrt', 'abs', 'absolute', 'exp', 'expm1', 'arcsin', 'arccos', 'arctan', 'sign', 'log', 'degrees', 'log2', 'log1p', 'rint', 'radians', 'reciprocal', 'square', 'negative', 'fix', 'ceil', 'floor', 'trunc', 'logical_not', 'arcsinh', 'arccosh', 'arctanh', +<<<<<<< 48fc7288f871e8b00d0721a7681a4715c040ee76 'tensordot', 'histogram', 'eye', 'linspace', 'logspace', 'expand_dims', 'tile', 'arange', 'split', 'vsplit', 'concatenate', 'stack', 'vstack', 'column_stack', 'dstack', 'mean', 'maximum', 'minimum', 'swapaxes', 'clip', 'argmax', 'argmin', 'std', 'var', 'indices', 'copysign', 'ravel', 'hanning', 'hamming', @@ -57,6 +58,14 @@ 'identity', 'take', 'ldexp', 'vdot', 'inner', 'outer', 'equal', 'not_equal', 'greater', 'less', 'greater_equal', 'less_equal', 'hsplit', 'rot90', 'einsum', 'true_divide', 'nonzero', 'shares_memory', 'may_share_memory', 'diff'] +======= + 'tensordot', 'histogram', 'eye', 'linspace', 'logspace', 'expand_dims', 'tile', 'arange', 'split', + 'vsplit', 'concatenate', 'stack', 'vstack', 'column_stack', 'dstack', 'mean', 'maximum', 'minimum', + 'swapaxes', 'clip', 'argmax', 'std', 'var', 'indices', 'copysign', 'ravel', 'hanning', 'hamming', + 'blackman', 'flip', 'around', 'arctan2', 'hypot', 'rad2deg', 'deg2rad', 'unique', 'lcm', 'tril', + 'identity', 'take', 'ldexp', 'vdot', 'inner', 'outer', 'equal', 'not_equal', 'greater', 'less', + 'greater_equal', 'less_equal', 'hsplit', 'rot90', 'einsum', 'true_divide'] +>>>>>>> style and test fix # Return code for dispatching indexing function call _NDARRAY_UNSUPPORTED_INDEXING = -1 @@ -4901,23 +4910,29 @@ def vstack(arrays, out=None): @set_module('mxnet.numpy') def column_stack(tup): - """ + """ column_stack(*args, **kwargs) + Stack 1-D arrays as columns into a 2-D array. + Take a sequence of 1-D arrays and stack them as columns to make a single 2-D array. 2-D arrays are stacked as-is, just like with `hstack`. 1-D arrays are turned into 2-D columns first. + Parameters ---------- tup : sequence of 1-D or 2-D arrays. Arrays to stack. All of them must have the same first dimension. + Returns - -------- + ------- stacked : 2-D array The array formed by stacking the given arrays. + See Also -------- stack, hstack, vstack, concatenate + Examples -------- >>> a = np.array((1,2,3)) diff --git a/python/mxnet/numpy_dispatch_protocol.py b/python/mxnet/numpy_dispatch_protocol.py index b2c7419b36b2..cfab2a49699d 100644 --- a/python/mxnet/numpy_dispatch_protocol.py +++ b/python/mxnet/numpy_dispatch_protocol.py @@ -121,7 +121,7 @@ def _run_with_array_ufunc_proto(*args, **kwargs): 'var', 'vdot', 'vstack', - # 'column_stack', + 'column_stack', 'zeros_like', 'linalg.norm', 'trace', diff --git a/python/mxnet/symbol/numpy/_symbol.py b/python/mxnet/symbol/numpy/_symbol.py index 87543574c502..283f5ca9f4cc 100644 --- a/python/mxnet/symbol/numpy/_symbol.py +++ b/python/mxnet/symbol/numpy/_symbol.py @@ -36,11 +36,19 @@ 'rint', 'radians', 'reciprocal', 'square', 'negative', 'fix', 'ceil', 'floor', 'trunc', 'logical_not', 'arcsinh', 'arccosh', 'arctanh', 'tensordot', 'histogram', 'eye', 'linspace', 'logspace', 'expand_dims', 'tile', 'arange', 'split', 'vsplit', 'concatenate', +<<<<<<< 48fc7288f871e8b00d0721a7681a4715c040ee76 'stack', 'vstack', 'column_stack', 'dstack', 'mean', 'maximum', 'minimum', 'swapaxes', 'clip', 'argmax', 'argmin', 'std', 'var', 'indices', 'copysign', 'ravel', 'hanning', 'hamming', 'blackman', 'flip', 'around', 'hypot', 'rad2deg', 'deg2rad', 'unique', 'lcm', 'tril', 'identity', 'take', 'ldexp', 'vdot', 'inner', 'outer', 'equal', 'not_equal', 'greater', 'less', 'greater_equal', 'less_equal', 'hsplit', 'rot90', 'einsum', 'true_divide', 'shares_memory', 'may_share_memory', 'diff'] +======= + 'stack', 'vstack', 'column_stack', 'dstack', 'mean', 'maximum', 'minimum', 'swapaxes', 'clip', + 'argmax', 'std', 'var', 'indices', 'copysign', 'ravel', 'hanning', 'hamming', 'blackman', 'flip', + 'around', 'hypot', 'rad2deg', 'deg2rad', 'unique', 'lcm', 'tril', 'identity', 'take', + 'ldexp', 'vdot', 'inner', 'outer', 'equal', 'not_equal', 'greater', 'less', 'greater_equal', + 'less_equal', 'hsplit', 'rot90', 'einsum', 'true_divide'] +>>>>>>> style and test fix def _num_outputs(sym): @@ -3072,6 +3080,43 @@ def get_list(arrays): return _npi.vstack(*arrays) +@set_module('mxnet.symbol.numpy') +def column_stack(tup): + """ column_stack(*args, **kwargs) + + Stack 1-D arrays as columns into a 2-D array. + + Take a sequence of 1-D arrays and stack them as columns + to make a single 2-D array. 2-D arrays are stacked as-is, + just like with `hstack`. 1-D arrays are turned into 2-D columns + first. + + Parameters + ---------- + tup : sequence of 1-D or 2-D arrays. + Arrays to stack. All of them must have the same first dimension. + + Returns + ------- + stacked : 2-D array + The array formed by stacking the given arrays. + + See Also + -------- + stack, hstack, vstack, concatenate + + Examples + -------- + >>> a = np.array((1,2,3)) + >>> b = np.array((2,3,4)) + >>> np.column_stack((a,b)) + array([[1, 2], + [2, 3], + [3, 4]]) + """ + return _npi.column_stack(*tup) + + @set_module('mxnet.symbol.numpy') def dstack(arrays): """ @@ -4589,6 +4634,7 @@ def einsum(*operands, **kwargs): operands = operands[1:] return _npi.einsum(*operands, subscripts=subscripts, out=out, optimize=int(optimize_arg)) +<<<<<<< 48fc7288f871e8b00d0721a7681a4715c040ee76 @set_module('mxnet.symbol.numpy') def column_stack(tup): @@ -4619,6 +4665,8 @@ def column_stack(tup): [3., 4.]]) """ return _npi.column_stack(*tup) +======= +>>>>>>> style and test fix @set_module('mxnet.symbol.numpy') diff --git a/src/operator/numpy/np_matrix_op.cc b/src/operator/numpy/np_matrix_op.cc index 27c94e02210a..587068b7593a 100644 --- a/src/operator/numpy/np_matrix_op.cc +++ b/src/operator/numpy/np_matrix_op.cc @@ -614,8 +614,8 @@ Examples:: .add_arguments(StackParam::__FIELDS__()); bool NumpyColumnStackType(const nnvm::NodeAttrs& attrs, - std::vector *in_type, - std::vector *out_type) { + std::vector *in_type, + std::vector *out_type) { const NumpyColumnStackParam& param = nnvm::get(attrs.parsed); CHECK_EQ(in_type->size(), param.num_args); CHECK_EQ(out_type->size(), 1); @@ -636,8 +636,8 @@ bool NumpyColumnStackType(const nnvm::NodeAttrs& attrs, } bool NumpyColumnStackShape(const nnvm::NodeAttrs& attrs, - mxnet::ShapeVector* in_attrs, - mxnet::ShapeVector* out_attrs) { + mxnet::ShapeVector* in_attrs, + mxnet::ShapeVector* out_attrs) { CHECK_EQ(out_attrs->size(), 1U); const NumpyColumnStackParam& param = nnvm::get(attrs.parsed); CHECK_EQ(in_attrs->size(), param.num_args); diff --git a/tests/python/unittest/test_numpy_interoperability.py b/tests/python/unittest/test_numpy_interoperability.py index 103f2c117ea6..68783fb2330b 100644 --- a/tests/python/unittest/test_numpy_interoperability.py +++ b/tests/python/unittest/test_numpy_interoperability.py @@ -1020,6 +1020,11 @@ def _add_workload_vstack(array_pool): OpArgMngr.add_workload('vstack', array_pool['4x1']) OpArgMngr.add_workload('vstack', array_pool['1x1x0']) +def _add_workload_column_stack(): + OpArgMngr.add_workload('column_stack', (np.array([1, 2, 3]), np.array([2, 3, 4]))) + OpArgMngr.add_workload('column_stack', (np.array([[1], [2], [3]]), np.array([[2], [3], [4]]))) + OpArgMngr.add_workload('column_stack', [np.array(_np.arange(3)) for _ in range(2)]) + def _add_workload_equal(array_pool): # TODO(junwu): fp16 does not work yet with TVM generated ops @@ -1207,6 +1212,7 @@ def _prepare_workloads(): _add_workload_logical_not(array_pool) _add_workload_vdot() _add_workload_vstack(array_pool) + _add_workload_column_stack() _add_workload_equal(array_pool) _add_workload_not_equal(array_pool) _add_workload_greater(array_pool) diff --git a/tests/python/unittest/test_numpy_op.py b/tests/python/unittest/test_numpy_op.py index 0c8068cc2261..406a840a0a29 100644 --- a/tests/python/unittest/test_numpy_op.py +++ b/tests/python/unittest/test_numpy_op.py @@ -3870,6 +3870,7 @@ def g(data): assert_almost_equal(mx_out.asnumpy(), expected_np, rtol=rtol, atol=atol) + if __name__ == '__main__': import nose nose.runmodule() From a5c52020baeb8cf6e567190040c2ac76576ab6cb Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Thu, 24 Oct 2019 05:50:20 +0000 Subject: [PATCH 16/23] fix pylint and add interoperability test --- python/mxnet/ndarray/numpy/_op.py | 6 ++++++ python/mxnet/numpy/multiarray.py | 15 ++++++--------- python/mxnet/symbol/numpy/_symbol.py | 4 ++-- tests/python/unittest/test_numpy_op.py | 1 - 4 files changed, 14 insertions(+), 12 deletions(-) diff --git a/python/mxnet/ndarray/numpy/_op.py b/python/mxnet/ndarray/numpy/_op.py index 0747990d7bdf..8caadd6e61ff 100644 --- a/python/mxnet/ndarray/numpy/_op.py +++ b/python/mxnet/ndarray/numpy/_op.py @@ -3031,9 +3031,15 @@ def column_stack(tup): >>> a = np.array((1,2,3)) >>> b = np.array((2,3,4)) >>> np.column_stack((a,b)) +<<<<<<< b819cfd12fe755ff17e73907987c669c5ceefad9 array([[1., 2.], [2., 3.], [3., 4.]]) +======= + array([[1, 2], + [2, 3], + [3, 4]]) +>>>>>>> fix pylint and add interoperability test """ return _npi.column_stack(*tup) diff --git a/python/mxnet/numpy/multiarray.py b/python/mxnet/numpy/multiarray.py index 669e6c015ce6..1a9eccebf3b9 100644 --- a/python/mxnet/numpy/multiarray.py +++ b/python/mxnet/numpy/multiarray.py @@ -50,7 +50,6 @@ 'sqrt', 'cbrt', 'abs', 'absolute', 'exp', 'expm1', 'arcsin', 'arccos', 'arctan', 'sign', 'log', 'degrees', 'log2', 'log1p', 'rint', 'radians', 'reciprocal', 'square', 'negative', 'fix', 'ceil', 'floor', 'trunc', 'logical_not', 'arcsinh', 'arccosh', 'arctanh', -<<<<<<< 48fc7288f871e8b00d0721a7681a4715c040ee76 'tensordot', 'histogram', 'eye', 'linspace', 'logspace', 'expand_dims', 'tile', 'arange', 'split', 'vsplit', 'concatenate', 'stack', 'vstack', 'column_stack', 'dstack', 'mean', 'maximum', 'minimum', 'swapaxes', 'clip', 'argmax', 'argmin', 'std', 'var', 'indices', 'copysign', 'ravel', 'hanning', 'hamming', @@ -58,14 +57,6 @@ 'identity', 'take', 'ldexp', 'vdot', 'inner', 'outer', 'equal', 'not_equal', 'greater', 'less', 'greater_equal', 'less_equal', 'hsplit', 'rot90', 'einsum', 'true_divide', 'nonzero', 'shares_memory', 'may_share_memory', 'diff'] -======= - 'tensordot', 'histogram', 'eye', 'linspace', 'logspace', 'expand_dims', 'tile', 'arange', 'split', - 'vsplit', 'concatenate', 'stack', 'vstack', 'column_stack', 'dstack', 'mean', 'maximum', 'minimum', - 'swapaxes', 'clip', 'argmax', 'std', 'var', 'indices', 'copysign', 'ravel', 'hanning', 'hamming', - 'blackman', 'flip', 'around', 'arctan2', 'hypot', 'rad2deg', 'deg2rad', 'unique', 'lcm', 'tril', - 'identity', 'take', 'ldexp', 'vdot', 'inner', 'outer', 'equal', 'not_equal', 'greater', 'less', - 'greater_equal', 'less_equal', 'hsplit', 'rot90', 'einsum', 'true_divide'] ->>>>>>> style and test fix # Return code for dispatching indexing function call _NDARRAY_UNSUPPORTED_INDEXING = -1 @@ -4938,9 +4929,15 @@ def column_stack(tup): >>> a = np.array((1,2,3)) >>> b = np.array((2,3,4)) >>> np.column_stack((a,b)) +<<<<<<< b819cfd12fe755ff17e73907987c669c5ceefad9 array([[1., 2.], [2., 3.], [3., 4.]]) +======= + array([[1, 2], + [2, 3], + [3, 4]]) +>>>>>>> fix pylint and add interoperability test """ return _mx_nd_np.column_stack(tup) diff --git a/python/mxnet/symbol/numpy/_symbol.py b/python/mxnet/symbol/numpy/_symbol.py index 283f5ca9f4cc..bbf4323cfde9 100644 --- a/python/mxnet/symbol/numpy/_symbol.py +++ b/python/mxnet/symbol/numpy/_symbol.py @@ -3111,8 +3111,8 @@ def column_stack(tup): >>> b = np.array((2,3,4)) >>> np.column_stack((a,b)) array([[1, 2], - [2, 3], - [3, 4]]) + [2, 3], + [3, 4]]) """ return _npi.column_stack(*tup) diff --git a/tests/python/unittest/test_numpy_op.py b/tests/python/unittest/test_numpy_op.py index 406a840a0a29..e8462d462e77 100644 --- a/tests/python/unittest/test_numpy_op.py +++ b/tests/python/unittest/test_numpy_op.py @@ -3868,7 +3868,6 @@ def g(data): mx_out = np.column_stack(v) expected_np = _np.column_stack(v_np) assert_almost_equal(mx_out.asnumpy(), expected_np, rtol=rtol, atol=atol) - if __name__ == '__main__': From c81d15339f38b79a617cd01c08d2ee62f07780f1 Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Thu, 24 Oct 2019 09:33:35 +0000 Subject: [PATCH 17/23] fix doc string, add comment, remove dead code --- python/mxnet/ndarray/numpy/_op.py | 6 ------ python/mxnet/numpy/multiarray.py | 9 +-------- python/mxnet/symbol/numpy/_symbol.py | 9 ++++----- src/operator/numpy/np_matrix_op.cc | 5 ++++- 4 files changed, 9 insertions(+), 20 deletions(-) diff --git a/python/mxnet/ndarray/numpy/_op.py b/python/mxnet/ndarray/numpy/_op.py index 8caadd6e61ff..0747990d7bdf 100644 --- a/python/mxnet/ndarray/numpy/_op.py +++ b/python/mxnet/ndarray/numpy/_op.py @@ -3031,15 +3031,9 @@ def column_stack(tup): >>> a = np.array((1,2,3)) >>> b = np.array((2,3,4)) >>> np.column_stack((a,b)) -<<<<<<< b819cfd12fe755ff17e73907987c669c5ceefad9 array([[1., 2.], [2., 3.], [3., 4.]]) -======= - array([[1, 2], - [2, 3], - [3, 4]]) ->>>>>>> fix pylint and add interoperability test """ return _npi.column_stack(*tup) diff --git a/python/mxnet/numpy/multiarray.py b/python/mxnet/numpy/multiarray.py index 1a9eccebf3b9..392f6280bcfc 100644 --- a/python/mxnet/numpy/multiarray.py +++ b/python/mxnet/numpy/multiarray.py @@ -4901,8 +4901,7 @@ def vstack(arrays, out=None): @set_module('mxnet.numpy') def column_stack(tup): - """ column_stack(*args, **kwargs) - + """ Stack 1-D arrays as columns into a 2-D array. Take a sequence of 1-D arrays and stack them as columns @@ -4929,15 +4928,9 @@ def column_stack(tup): >>> a = np.array((1,2,3)) >>> b = np.array((2,3,4)) >>> np.column_stack((a,b)) -<<<<<<< b819cfd12fe755ff17e73907987c669c5ceefad9 array([[1., 2.], [2., 3.], [3., 4.]]) -======= - array([[1, 2], - [2, 3], - [3, 4]]) ->>>>>>> fix pylint and add interoperability test """ return _mx_nd_np.column_stack(tup) diff --git a/python/mxnet/symbol/numpy/_symbol.py b/python/mxnet/symbol/numpy/_symbol.py index bbf4323cfde9..a91278f2e9ce 100644 --- a/python/mxnet/symbol/numpy/_symbol.py +++ b/python/mxnet/symbol/numpy/_symbol.py @@ -3082,8 +3082,7 @@ def get_list(arrays): @set_module('mxnet.symbol.numpy') def column_stack(tup): - """ column_stack(*args, **kwargs) - + """ Stack 1-D arrays as columns into a 2-D array. Take a sequence of 1-D arrays and stack them as columns @@ -3110,9 +3109,9 @@ def column_stack(tup): >>> a = np.array((1,2,3)) >>> b = np.array((2,3,4)) >>> np.column_stack((a,b)) - array([[1, 2], - [2, 3], - [3, 4]]) + array([[1., 2.], + [2., 3.], + [3., 4.]]) """ return _npi.column_stack(*tup) diff --git a/src/operator/numpy/np_matrix_op.cc b/src/operator/numpy/np_matrix_op.cc index 587068b7593a..18594cd9cff1 100644 --- a/src/operator/numpy/np_matrix_op.cc +++ b/src/operator/numpy/np_matrix_op.cc @@ -643,10 +643,12 @@ bool NumpyColumnStackShape(const nnvm::NodeAttrs& attrs, CHECK_EQ(in_attrs->size(), param.num_args); std::vector in_attrs_tmp(param.num_args); TShape dshape; + // For each array in the input, reshape to 2D if ndim < 2. for (int i = 0; i < param.num_args; i++) { if ((*in_attrs)[i].ndim() == 0) { in_attrs_tmp[i] = TShape(2, 1); } else if ((*in_attrs)[i].ndim() == 1) { + // Transpose 1D row into a column. in_attrs_tmp[i] = TShape(2, 1); in_attrs_tmp[i][0] = (*in_attrs)[i][0]; } else { @@ -664,6 +666,7 @@ bool NumpyColumnStackShape(const nnvm::NodeAttrs& attrs, if (dshape.ndim() == -1) { return false; } + // Accumulate along column axis. int cnt = 0, sum = 0, pos = -1; for (int i = 0; i < param.num_args; i++) { TShape tmp = in_attrs_tmp[i]; @@ -693,11 +696,11 @@ bool NumpyColumnStackShape(const nnvm::NodeAttrs& attrs, if (!shape_is_known(dshape)) { return false; } - dshape[1] = sum; if (cnt == 0) { SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape); } else if (cnt == 1) { + // Infer missing dimension if only one column dimension of the input is missing if (pos >= 0) { in_attrs_tmp[pos][1] = out_attrs->at(0)[1] - sum; } else { From 884428d4a7c9624817bdfa4f654b8d738e6c6d75 Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Thu, 24 Oct 2019 10:00:01 +0000 Subject: [PATCH 18/23] pylint --- python/mxnet/numpy/multiarray.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/mxnet/numpy/multiarray.py b/python/mxnet/numpy/multiarray.py index 392f6280bcfc..d7c91b8d2fb3 100644 --- a/python/mxnet/numpy/multiarray.py +++ b/python/mxnet/numpy/multiarray.py @@ -4901,7 +4901,7 @@ def vstack(arrays, out=None): @set_module('mxnet.numpy') def column_stack(tup): - """ + """ Stack 1-D arrays as columns into a 2-D array. Take a sequence of 1-D arrays and stack them as columns From f7f1f0430d6965d507ab6b602fe83144ccc976dd Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Fri, 25 Oct 2019 04:11:51 +0000 Subject: [PATCH 19/23] ci --- python/mxnet/numpy/multiarray.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/mxnet/numpy/multiarray.py b/python/mxnet/numpy/multiarray.py index d7c91b8d2fb3..8af764db6e72 100644 --- a/python/mxnet/numpy/multiarray.py +++ b/python/mxnet/numpy/multiarray.py @@ -4915,7 +4915,7 @@ def column_stack(tup): Arrays to stack. All of them must have the same first dimension. Returns - ------- + -------- stacked : 2-D array The array formed by stacking the given arrays. From 08db148d3e16ebf02e02a67cf3bb47cbfab90c4f Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Fri, 25 Oct 2019 08:19:50 +0000 Subject: [PATCH 20/23] ci --- python/mxnet/ndarray/numpy/_op.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/mxnet/ndarray/numpy/_op.py b/python/mxnet/ndarray/numpy/_op.py index 0747990d7bdf..8b1e9fb210e4 100644 --- a/python/mxnet/ndarray/numpy/_op.py +++ b/python/mxnet/ndarray/numpy/_op.py @@ -3018,7 +3018,7 @@ def column_stack(tup): Arrays to stack. All of them must have the same first dimension. Returns - ------- + -------- stacked : 2-D array The array formed by stacking the given arrays. From 761149ab245f0ab84551272732497fde19bceda3 Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Wed, 30 Oct 2019 07:39:19 +0000 Subject: [PATCH 21/23] ci --- src/operator/numpy/np_matrix_op-inl.h | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/operator/numpy/np_matrix_op-inl.h b/src/operator/numpy/np_matrix_op-inl.h index c1f5a1b1d8dc..622bf613ed69 100644 --- a/src/operator/numpy/np_matrix_op-inl.h +++ b/src/operator/numpy/np_matrix_op-inl.h @@ -52,6 +52,14 @@ struct NumpyVstackParam : public dmlc::Parameter { } }; +struct NumpyColumnStackParam : public dmlc::Parameter { + int num_args; + DMLC_DECLARE_PARAMETER(NumpyColumnStackParam) { + DMLC_DECLARE_FIELD(num_args).set_lower_bound(1) + .describe("Number of inputs to be column stacked"); + } +}; + struct NumpyReshapeParam : public dmlc::Parameter { mxnet::TShape newshape; std::string order; @@ -102,14 +110,6 @@ struct NumpyXReshapeParam : public dmlc::Parameter { " Note that currently only C-like order is" " supported"); -struct NumpyColumnStackParam : public dmlc::Parameter { - int num_args; - DMLC_DECLARE_PARAMETER(NumpyColumnStackParam) { - DMLC_DECLARE_FIELD(num_args).set_lower_bound(1) - .describe("Number of inputs to be column stacked"); - } -}; - template void NumpyTranspose(const nnvm::NodeAttrs& attrs, const OpContext& ctx, From 173c1f1c68baf749da7df6aab7acf39a4aca810b Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Wed, 30 Oct 2019 11:48:57 +0000 Subject: [PATCH 22/23] rebase resolve conflicts --- python/mxnet/symbol/numpy/_symbol.py | 16 ---------------- src/operator/numpy/np_matrix_op-inl.h | 2 ++ 2 files changed, 2 insertions(+), 16 deletions(-) diff --git a/python/mxnet/symbol/numpy/_symbol.py b/python/mxnet/symbol/numpy/_symbol.py index 8a58fd93d9dd..ae69a94a73af 100644 --- a/python/mxnet/symbol/numpy/_symbol.py +++ b/python/mxnet/symbol/numpy/_symbol.py @@ -36,24 +36,11 @@ 'rint', 'radians', 'reciprocal', 'square', 'negative', 'fix', 'ceil', 'floor', 'trunc', 'logical_not', 'arcsinh', 'arccosh', 'arctanh', 'tensordot', 'histogram', 'eye', 'linspace', 'logspace', 'expand_dims', 'tile', 'arange', 'split', 'vsplit', 'concatenate', -<<<<<<< HEAD -<<<<<<< 48fc7288f871e8b00d0721a7681a4715c040ee76 'stack', 'vstack', 'column_stack', 'dstack', 'mean', 'maximum', 'minimum', 'swapaxes', 'clip', 'argmax', 'argmin', 'std', 'var', 'indices', 'copysign', 'ravel', 'hanning', 'hamming', 'blackman', 'flip', -======= - 'stack', 'vstack', 'column_stack', 'dstack', 'mean', 'maximum', 'minimum', 'swapaxes', 'clip', 'argmax', - 'argmin', 'std', 'var', 'indices', 'copysign', 'ravel', 'hanning', 'hamming', 'blackman', 'flip', ->>>>>>> 1f703c3a0e327281a1b1e01b7ddb3e2c1377ee9d 'around', 'hypot', 'rad2deg', 'deg2rad', 'unique', 'lcm', 'tril', 'identity', 'take', 'ldexp', 'vdot', 'inner', 'outer', 'equal', 'not_equal', 'greater', 'less', 'greater_equal', 'less_equal', 'hsplit', 'rot90', 'einsum', 'true_divide', 'shares_memory', 'may_share_memory', 'diff'] -======= - 'stack', 'vstack', 'column_stack', 'dstack', 'mean', 'maximum', 'minimum', 'swapaxes', 'clip', - 'argmax', 'std', 'var', 'indices', 'copysign', 'ravel', 'hanning', 'hamming', 'blackman', 'flip', - 'around', 'hypot', 'rad2deg', 'deg2rad', 'unique', 'lcm', 'tril', 'identity', 'take', - 'ldexp', 'vdot', 'inner', 'outer', 'equal', 'not_equal', 'greater', 'less', 'greater_equal', - 'less_equal', 'hsplit', 'rot90', 'einsum', 'true_divide'] ->>>>>>> style and test fix def _num_outputs(sym): @@ -4638,7 +4625,6 @@ def einsum(*operands, **kwargs): operands = operands[1:] return _npi.einsum(*operands, subscripts=subscripts, out=out, optimize=int(optimize_arg)) -<<<<<<< 48fc7288f871e8b00d0721a7681a4715c040ee76 @set_module('mxnet.symbol.numpy') def column_stack(tup): @@ -4669,8 +4655,6 @@ def column_stack(tup): [3., 4.]]) """ return _npi.column_stack(*tup) -======= ->>>>>>> style and test fix @set_module('mxnet.symbol.numpy') diff --git a/src/operator/numpy/np_matrix_op-inl.h b/src/operator/numpy/np_matrix_op-inl.h index 622bf613ed69..2545adcb3555 100644 --- a/src/operator/numpy/np_matrix_op-inl.h +++ b/src/operator/numpy/np_matrix_op-inl.h @@ -109,6 +109,8 @@ struct NumpyXReshapeParam : public dmlc::Parameter { " back to the first axis index changing slowest." " Note that currently only C-like order is" " supported"); + } +}; template void NumpyTranspose(const nnvm::NodeAttrs& attrs, From 209acc265ba5bcfaf9a5f90b82a1797d2c711fcb Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Wed, 30 Oct 2019 12:23:53 +0000 Subject: [PATCH 23/23] pylint --- python/mxnet/ndarray/numpy/_op.py | 4 ++-- python/mxnet/symbol/numpy/_symbol.py | 35 ++-------------------------- 2 files changed, 4 insertions(+), 35 deletions(-) diff --git a/python/mxnet/ndarray/numpy/_op.py b/python/mxnet/ndarray/numpy/_op.py index 3afc80878ce6..256cfb7d5708 100644 --- a/python/mxnet/ndarray/numpy/_op.py +++ b/python/mxnet/ndarray/numpy/_op.py @@ -34,8 +34,8 @@ 'log1p', 'rint', 'radians', 'reciprocal', 'square', 'negative', 'fix', 'ceil', 'floor', 'trunc', 'logical_not', 'arcsinh', 'arccosh', 'arctanh', 'tensordot', 'histogram', 'eye', 'linspace', 'logspace', 'expand_dims', 'tile', 'arange', 'split', 'vsplit', 'concatenate', - 'stack', 'vstack', 'column_stack', 'dstack', 'mean', 'maximum', 'minimum', 'swapaxes', 'clip', 'argmax', 'argmin', - 'std', 'var', 'indices', 'copysign', 'ravel', 'hanning', 'hamming', 'blackman', 'flip', + 'stack', 'vstack', 'column_stack', 'dstack', 'mean', 'maximum', 'minimum', 'swapaxes', 'clip', 'argmax', + 'argmin', 'std', 'var', 'indices', 'copysign', 'ravel', 'hanning', 'hamming', 'blackman', 'flip', 'around', 'hypot', 'rad2deg', 'deg2rad', 'unique', 'lcm', 'tril', 'identity', 'take', 'ldexp', 'vdot', 'inner', 'outer', 'equal', 'not_equal', 'greater', 'less', 'greater_equal', 'less_equal', 'hsplit', 'rot90', 'einsum', 'true_divide', 'nonzero', 'shares_memory', 'may_share_memory', 'diff'] diff --git a/python/mxnet/symbol/numpy/_symbol.py b/python/mxnet/symbol/numpy/_symbol.py index ae69a94a73af..7469875f267a 100644 --- a/python/mxnet/symbol/numpy/_symbol.py +++ b/python/mxnet/symbol/numpy/_symbol.py @@ -36,8 +36,8 @@ 'rint', 'radians', 'reciprocal', 'square', 'negative', 'fix', 'ceil', 'floor', 'trunc', 'logical_not', 'arcsinh', 'arccosh', 'arctanh', 'tensordot', 'histogram', 'eye', 'linspace', 'logspace', 'expand_dims', 'tile', 'arange', 'split', 'vsplit', 'concatenate', - 'stack', 'vstack', 'column_stack', 'dstack', 'mean', 'maximum', 'minimum', 'swapaxes', 'clip', 'argmax', 'argmin', - 'std', 'var', 'indices', 'copysign', 'ravel', 'hanning', 'hamming', 'blackman', 'flip', + 'stack', 'vstack', 'column_stack', 'dstack', 'mean', 'maximum', 'minimum', 'swapaxes', 'clip', 'argmax', + 'argmin', 'std', 'var', 'indices', 'copysign', 'ravel', 'hanning', 'hamming', 'blackman', 'flip', 'around', 'hypot', 'rad2deg', 'deg2rad', 'unique', 'lcm', 'tril', 'identity', 'take', 'ldexp', 'vdot', 'inner', 'outer', 'equal', 'not_equal', 'greater', 'less', 'greater_equal', 'less_equal', 'hsplit', 'rot90', 'einsum', 'true_divide', 'shares_memory', 'may_share_memory', 'diff'] @@ -4626,37 +4626,6 @@ def einsum(*operands, **kwargs): return _npi.einsum(*operands, subscripts=subscripts, out=out, optimize=int(optimize_arg)) -@set_module('mxnet.symbol.numpy') -def column_stack(tup): - """ - Stack 1-D arrays as columns into a 2-D array. - Take a sequence of 1-D arrays and stack them as columns - to make a single 2-D array. 2-D arrays are stacked as-is, - just like with `hstack`. 1-D arrays are turned into 2-D columns - first. - Parameters - ---------- - tup : sequence of 1-D or 2-D arrays. - Arrays to stack. All of them must have the same first dimension. - Returns - ------- - stacked : 2-D array - The array formed by stacking the given arrays. - See Also - -------- - stack, hstack, vstack, concatenate - Examples - -------- - >>> a = np.array((1,2,3)) - >>> b = np.array((2,3,4)) - >>> np.column_stack((a,b)) - array([[1., 2.], - [2., 3.], - [3., 4.]]) - """ - return _npi.column_stack(*tup) - - @set_module('mxnet.symbol.numpy') def shares_memory(a, b, max_work=None): """