Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

add numpy op diagflat [numpy] #16813

Merged
merged 8 commits into from
Nov 21, 2019
Merged
Show file tree
Hide file tree
Changes from 7 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
39 changes: 38 additions & 1 deletion python/mxnet/ndarray/numpy/_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
from ..ndarray import NDArray

__all__ = ['shape', 'zeros', 'ones', 'full', 'add', 'subtract', 'multiply', 'divide', 'mod', 'remainder', 'power',
'arctan2', 'sin', 'cos', 'tan', 'sinh', 'cosh', 'tanh', 'log10', 'sqrt', 'cbrt', 'abs',
'arctan2', 'sin', 'cos', 'tan', 'diagflat', 'sinh', 'cosh', 'tanh', 'log10', 'sqrt', 'cbrt', 'abs',
'absolute', 'exp', 'expm1', 'arcsin', 'arccos', 'arctan', 'sign', 'log', 'degrees', 'log2',
'log1p', 'rint', 'radians', 'reciprocal', 'square', 'negative', 'fix', 'ceil', 'floor',
'trunc', 'logical_not', 'arcsinh', 'arccosh', 'arctanh', 'tensordot', 'histogram', 'eye',
Expand Down Expand Up @@ -1441,6 +1441,43 @@ def tanh(x, out=None, **kwargs):
return _unary_func_helper(x, _npi.tanh, _np.tanh, out=out, **kwargs)


@set_module('mxnet.ndarray.numpy')
def diagflat(arr, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
arr : ndarray
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
return _npi.diagflat(arr, k=k)


@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def log10(x, out=None, **kwargs):
Expand Down
39 changes: 38 additions & 1 deletion python/mxnet/numpy/multiarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@
from ..ndarray.ndarray import _storage_type

__all__ = ['ndarray', 'empty', 'array', 'shape', 'zeros', 'ones', 'full', 'add', 'subtract', 'multiply', 'divide',
'mod', 'remainder', 'power', 'arctan2', 'sin', 'cos', 'tan', 'sinh', 'cosh', 'tanh', 'log10',
'mod', 'remainder', 'power', 'arctan2', 'sin', 'cos', 'tan', 'diagflat', 'sinh', 'cosh', 'tanh', 'log10',
'sqrt', 'cbrt', 'abs', 'absolute', 'exp', 'expm1', 'arcsin', 'arccos', 'arctan', 'sign', 'log',
'degrees', 'log2', 'log1p', 'rint', 'radians', 'reciprocal', 'square', 'negative',
'fix', 'ceil', 'floor', 'trunc', 'logical_not', 'arcsinh', 'arccosh', 'arctanh', 'append',
Expand Down Expand Up @@ -2981,6 +2981,43 @@ def tanh(x, out=None, **kwargs):
return _mx_nd_np.tanh(x, out=out, **kwargs)


@set_module('mxnet.numpy')
def diagflat(arr, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
arr : ndarray
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
return _npi.diagflat(arr, k=k)


@set_module('mxnet.numpy')
@wrap_np_unary_func
def log10(x, out=None, **kwargs):
Expand Down
1 change: 1 addition & 0 deletions python/mxnet/numpy_dispatch_protocol.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,7 @@ def _run_with_array_ufunc_proto(*args, **kwargs):
'copy',
'cumsum',
'diag',
'diagflat',
'dot',
'expand_dims',
'fix',
Expand Down
39 changes: 38 additions & 1 deletion python/mxnet/symbol/numpy/_symbol.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
__all__ = ['zeros', 'ones', 'add', 'subtract', 'multiply', 'divide', 'mod', 'remainder', 'power', 'arctan2',
'sin', 'cos', 'tan', 'sinh', 'cosh', 'tanh', 'log10', 'sqrt', 'cbrt', 'abs', 'absolute', 'exp',
'expm1', 'arcsin', 'arccos', 'arctan', 'sign', 'log', 'degrees', 'log2', 'log1p',
'rint', 'radians', 'reciprocal', 'square', 'negative', 'fix', 'ceil', 'floor',
'rint', 'radians', 'diagflat', 'reciprocal', 'square', 'negative', 'fix', 'ceil', 'floor',
'trunc', 'logical_not', 'arcsinh', 'arccosh', 'arctanh', 'tensordot', 'histogram', 'eye',
'linspace', 'logspace', 'expand_dims', 'tile', 'arange', 'split', 'vsplit', 'concatenate', 'append',
'stack', 'vstack', 'column_stack', 'dstack', 'mean', 'maximum', 'minimum', 'swapaxes', 'clip', 'argmax',
Expand Down Expand Up @@ -2270,6 +2270,43 @@ def radians(x, out=None, **kwargs):
return _unary_func_helper(x, _npi.radians, _np.radians, out=out, **kwargs)


@set_module('mxnet.symbol.numpy')
def diagflat(arr, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
arr : ndarray
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
return _npi.diagflat(arr, k=k)


@set_module('mxnet.symbol.numpy')
@wrap_np_unary_func
def deg2rad(x, out=None, **kwargs):
Expand Down
130 changes: 130 additions & 0 deletions src/operator/numpy/np_matrix_op-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -1150,6 +1150,136 @@ void NumpyDiagOpBackward(const nnvm::NodeAttrs &attrs,
in_data.Size(), param.k, s, req[0]);
}

struct NumpyDiagflatParam : public dmlc::Parameter<NumpyDiagflatParam> {
int k;
DMLC_DECLARE_PARAMETER(NumpyDiagflatParam) {
DMLC_DECLARE_FIELD(k).set_default(0).describe("Diagonal in question. The default is 0. "
"Use k>0 for diagonals above the main diagonal, "
"and k<0 for diagonals below the main diagonal. ");
ryanwentaoxu marked this conversation as resolved.
Show resolved Hide resolved
}
};

inline mxnet::TShape NumpyDiagflatShapeImpl(const mxnet::TShape& ishape, const int k) {
if (ishape.ndim() == 1) {
auto s = ishape[0] + std::abs(k);
return mxnet::TShape({s, s});
}

if ( ishape.ndim() >= 2 ) {
ryanwentaoxu marked this conversation as resolved.
Show resolved Hide resolved
auto s = 1;
for ( int i = 0; i < ishape.ndim(); i++ ) {
if ( ishape[i] >= 2 ) {
s = s * ishape[i];
}
}
s = s + std::abs(k);
return mxnet::TShape({s, s});
}
return mxnet::TShape({-1, -1});
}

inline bool NumpyDiagflatOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
ryanwentaoxu marked this conversation as resolved.
Show resolved Hide resolved
mxnet::ShapeVector* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);

const mxnet::TShape& ishape = (*in_attrs)[0];
if (!mxnet::ndim_is_known(ishape)) {
return false;
}
const NumpyDiagflatParam& param = nnvm::get<NumpyDiagflatParam>(attrs.parsed);

mxnet::TShape oshape = NumpyDiagflatShapeImpl(ishape,
param.k);
ryanwentaoxu marked this conversation as resolved.
Show resolved Hide resolved

if (shape_is_none(oshape)) {
LOG(FATAL) << "Diagonal does not exist.";
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape);

return shape_is_known(out_attrs->at(0));
}

inline bool NumpyDiagflatOpType(const nnvm::NodeAttrs& attrs,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);

TYPE_ASSIGN_CHECK(*out_attrs, 0, (*in_attrs)[0]);
TYPE_ASSIGN_CHECK(*in_attrs, 0, (*out_attrs)[0]);
return (*out_attrs)[0] != -1;
}

template<typename xpu, bool back>
void NumpyDiagflatOpImpl(const TBlob& in_data,
const TBlob& out_data,
const mxnet::TShape& ishape,
const mxnet::TShape& oshape,
index_t dsize,
const NumpyDiagflatParam& param,
mxnet_op::Stream<xpu> *s,
const std::vector<OpReqType>& req) {
using namespace mxnet_op;
using namespace mshadow;
MSHADOW_TYPE_SWITCH(out_data.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
Kernel<diag_gen<req_type, back>, xpu>::Launch(s,
dsize,
out_data.dptr<DType>(),
in_data.dptr<DType>(),
Shape2(oshape[0], oshape[1]),
param.k);
ryanwentaoxu marked this conversation as resolved.
Show resolved Hide resolved
});
});
}

template<typename xpu>
void NumpyDiagflatOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mxnet_op;
using namespace mshadow;
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
CHECK_EQ(req[0], kWriteTo);
Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob& in_data = inputs[0];
const TBlob& out_data = outputs[0];
const mxnet::TShape& ishape = inputs[0].shape_;
const mxnet::TShape& oshape = outputs[0].shape_;
const NumpyDiagflatParam& param = nnvm::get<NumpyDiagflatParam>(attrs.parsed);

NumpyDiagflatOpImpl<xpu, false>(in_data, out_data, ishape,
oshape, out_data.Size(), param, s, req);
}

template<typename xpu>
void NumpyDiagflatOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mxnet_op;
using namespace mshadow;
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
Stream<xpu> *s = ctx.get_stream<xpu>();

const TBlob& in_data = inputs[0];
const TBlob& out_data = outputs[0];
const mxnet::TShape& ishape = inputs[0].shape_;
const mxnet::TShape& oshape = outputs[0].shape_;
const NumpyDiagflatParam& param = nnvm::get<NumpyDiagflatParam>(attrs.parsed);

NumpyDiagflatOpImpl<xpu, true>(in_data, out_data, oshape,
ishape, in_data.Size(), param, s, req);
ryanwentaoxu marked this conversation as resolved.
Show resolved Hide resolved
}

} // namespace op
} // namespace mxnet

Expand Down
23 changes: 23 additions & 0 deletions src/operator/numpy/np_matrix_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ DMLC_REGISTER_PARAMETER(NumpyRot90Param);
DMLC_REGISTER_PARAMETER(NumpyReshapeParam);
DMLC_REGISTER_PARAMETER(NumpyXReshapeParam);
DMLC_REGISTER_PARAMETER(NumpyDiagParam);
DMLC_REGISTER_PARAMETER(NumpyDiagflatParam);


bool NumpyTransposeShape(const nnvm::NodeAttrs& attrs,
Expand Down Expand Up @@ -1325,5 +1326,27 @@ NNVM_REGISTER_OP(_backward_np_diag)
.set_attr<nnvm::TIsBackward>("TIsBackward", true)
.set_attr<FCompute>("FCompute<cpu>", NumpyDiagOpBackward<cpu>);

NNVM_REGISTER_OP(_npi_diagflat)
ryanwentaoxu marked this conversation as resolved.
Show resolved Hide resolved
.set_attr_parser(ParamParser<NumpyDiagflatParam>)
.set_num_inputs(1)
.set_num_outputs(1)
.set_attr<nnvm::FListInputNames>("FListInputNames",
[](const NodeAttrs& attrs) {
return std::vector<std::string>{"data"};
})
.set_attr<mxnet::FInferShape>("FInferShape", NumpyDiagflatOpShape)
.set_attr<nnvm::FInferType>("FInferType", NumpyDiagflatOpType)
.set_attr<FCompute>("FCompute<cpu>", NumpyDiagflatOpForward<cpu>)
.set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseNone{"_backward_npi_diagflat"})
.add_argument("data", "NDArray-or-Symbol", "Input ndarray")
.add_arguments(NumpyDiagflatParam::__FIELDS__());

NNVM_REGISTER_OP(_backward_npi_diagflat)
.set_attr_parser(ParamParser<NumpyDiagflatParam>)
.set_num_inputs(1)
.set_num_outputs(1)
.set_attr<nnvm::TIsBackward>("TIsBackward", true)
.set_attr<FCompute>("FCompute<cpu>", NumpyDiagflatOpBackward<cpu>);

} // namespace op
} // namespace mxnet
6 changes: 6 additions & 0 deletions src/operator/numpy/np_matrix_op.cu
Original file line number Diff line number Diff line change
Expand Up @@ -124,5 +124,11 @@ NNVM_REGISTER_OP(_np_diag)
NNVM_REGISTER_OP(_backward_np_diag)
.set_attr<FCompute>("FCompute<gpu>", NumpyDiagOpBackward<gpu>);

NNVM_REGISTER_OP(_npi_diagflat)
.set_attr<FCompute>("FCompute<gpu>", NumpyDiagflatOpForward<gpu>);

NNVM_REGISTER_OP(_backward_npi_diagflat)
.set_attr<FCompute>("FCompute<gpu>", NumpyDiagflatOpBackward<gpu>);

} // namespace op
} // namespace mxnet
29 changes: 29 additions & 0 deletions tests/python/unittest/test_numpy_interoperability.py
Original file line number Diff line number Diff line change
Expand Up @@ -1201,6 +1201,34 @@ def _add_workload_nonzero():
OpArgMngr.add_workload('nonzero', np.array([False, False, False], dtype=np.bool_))
OpArgMngr.add_workload('nonzero', np.array([True, False, False], dtype=np.bool_))

def _add_workload_diagflat():
def get_mat(n):
data = _np.arange(n)
data = _np.add.outer(data,data)
return data

A = np.array([[1,2],[3,4],[5,6]])
vals = (100 * np.arange(5)).astype('l')
vals_c = (100 * np.array(get_mat(5)) + 1).astype('l')
vals_f = _np.array((100 * get_mat(5) + 1), order='F', dtype='l')
vals_f = np.array(vals_f)

OpArgMngr.add_workload('diagflat', A, k=2)
OpArgMngr.add_workload('diagflat', A, k=1)
OpArgMngr.add_workload('diagflat', A, k=0)
OpArgMngr.add_workload('diagflat', A, k=-1)
OpArgMngr.add_workload('diagflat', A, k=-2)
OpArgMngr.add_workload('diagflat', A, k=-3)
OpArgMngr.add_workload('diagflat', vals, k=0)
OpArgMngr.add_workload('diagflat', vals, k=2)
OpArgMngr.add_workload('diagflat', vals, k=-2)
OpArgMngr.add_workload('diagflat', vals_c, k=0)
OpArgMngr.add_workload('diagflat', vals_c, k=2)
OpArgMngr.add_workload('diagflat', vals_c, k=-2)
OpArgMngr.add_workload('diagflat', vals_f, k=0)
OpArgMngr.add_workload('diagflat', vals_f, k=2)
OpArgMngr.add_workload('diagflat', vals_f, k=-2)


def _add_workload_shape():
OpArgMngr.add_workload('shape', np.random.uniform(size=()))
Expand Down Expand Up @@ -1263,6 +1291,7 @@ def _prepare_workloads():
_add_workload_cumsum()
_add_workload_ravel()
_add_workload_diag()
_add_workload_diagflat()
_add_workload_dot()
_add_workload_expand_dims()
_add_workload_fix()
Expand Down
Loading