Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
added extraction/generation of diagonal and triangonal matrices to li…
Browse files Browse the repository at this point in the history
…nalg
  • Loading branch information
Asmus Hetzel committed Mar 22, 2019
1 parent 39412b3 commit 7d5e1db
Show file tree
Hide file tree
Showing 7 changed files with 483 additions and 2 deletions.
6 changes: 5 additions & 1 deletion docs/api/python/ndarray/linalg.md
Original file line number Diff line number Diff line change
Expand Up @@ -51,10 +51,14 @@ In the rest of this document, we list routines provided by the `ndarray.linalg`
potri
trmm
trsm
sumlogdiag
syrk
gelqf
syevd
sumlogdiag
extractdiag
makediag
extracttrian
maketrian
```

## API Reference
Expand Down
6 changes: 5 additions & 1 deletion docs/api/python/symbol/linalg.md
Original file line number Diff line number Diff line change
Expand Up @@ -51,10 +51,14 @@ In the rest of this document, we list routines provided by the `symbol.linalg` p
potri
trmm
trsm
sumlogdiag
syrk
gelqf
syevd
sumlogdiag
extractdiag
makediag
extracttrian
maketrian
```

## API Reference
Expand Down
94 changes: 94 additions & 0 deletions src/operator/tensor/la_op-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -229,6 +229,100 @@ struct sumlogdiag {
}
};

template<bool forward>
struct CopyDiag {
template<typename DType>
MSHADOW_XINLINE static void Map(int i, int k, int n, DType* A, DType* B) {
// Index of the matrix from which the diagonal should be extracted.
const int matrix(i / (n-abs(k)));
// Index of the diagonal element that should be extracted.
const int index(i % (n-abs(k)));
// row/col that must be looked up.
const int row(index-(k < 0 ? k : 0)), col(index+(k > 0 ? k :0));
if (forward) {
B[i] = A[(matrix*n+row)*n+col];
} else {
B[(matrix*n+row)*n+col] = A[i];
}
}
};

struct copydiag {
// Extracts diagonal from matrix.
template<typename xpu, typename DType>
static void op(const Tensor<xpu, 3, DType>& A, const Tensor<xpu, 2, DType>& B,
const OpContext& ctx, const nnvm::NodeAttrs& attrs) {
using namespace mxnet_op;
Stream<xpu> *s = ctx.get_stream<xpu>();
const LaDiagParam& param = nnvm::get<LaDiagParam>(attrs.parsed);
Kernel<CopyDiag<true>, xpu>::Launch(s, B.MSize(), param.offset, A.size(1), A.dptr_, B.dptr_);
}
// Sets diagonal in matrix.
template<typename xpu, typename DType>
static void op(const Tensor<xpu, 2, DType>& A, const Tensor<xpu, 3, DType>& B,
const OpContext& ctx, const nnvm::NodeAttrs& attrs) {
using namespace mxnet_op;
Stream<xpu> *s = ctx.get_stream<xpu>();
const LaDiagParam& param = nnvm::get<LaDiagParam>(attrs.parsed);
Kernel<set_zero, xpu>::Launch(s, B.MSize(), B.dptr_);
Kernel<CopyDiag<false>, xpu>::Launch(s, A.MSize(), param.offset, B.size(1), A.dptr_, B.dptr_);
}
};

template<bool forward>
struct CopyTrian {
template<typename DType>
MSHADOW_XINLINE static void Map(int i, bool lower, int k, int n, DType* A, DType* B) {
// Matrix that this index belongs to.
const int matrix(i/(n*n));
// Row/Col that this index represents.
int row((i/n)%n), col(i%n);
if ((k > 0) || ((k == 0) && !lower)) {
// When working on upper triangle we switch to transposed coordinates for indexing.
int tmp(row);
row = col;
col = tmp;
}
// Actual row inside the lower triangular matrix after offset adjustment.
row -= abs(k);
if (row >= col) {
// Index in the 1-dimensional array that holds the values of the triangle.
const int index((row*(row+1))/2+col);
// Total number of entries in the triangle.
const int m(((n-abs(k))*(n-abs(k)+1))/2);
if (forward) {
B[m*matrix+index] = A[i];
} else {
B[i] = A[m*matrix+index];
}
}
}
};

struct copytrian {
// Extracts triangle from matrix.
template<typename xpu, typename DType>
static void op(const Tensor<xpu, 3, DType>& A, const Tensor<xpu, 2, DType>& B,
const OpContext& ctx, const nnvm::NodeAttrs& attrs) {
using namespace mxnet_op;
Stream<xpu> *s = ctx.get_stream<xpu>();
const LaTrianParam& param = nnvm::get<LaTrianParam>(attrs.parsed);
Kernel<CopyTrian<true>, xpu>::Launch(s, A.MSize(), param.lower, param.offset,
A.size(1), A.dptr_, B.dptr_);
}
// Sets triangle in matrix.
template<typename xpu, typename DType>
static void op(const Tensor<xpu, 2, DType>& A, const Tensor<xpu, 3, DType>& B,
const OpContext& ctx, const nnvm::NodeAttrs& attrs) {
using namespace mxnet_op;
Stream<xpu> *s = ctx.get_stream<xpu>();
const LaTrianParam& param = nnvm::get<LaTrianParam>(attrs.parsed);
Kernel<set_zero, xpu>::Launch(s, B.MSize(), B.dptr_);
Kernel<CopyTrian<false>, xpu>::Launch(s, B.MSize(), param.lower, param.offset,
B.size(1), A.dptr_, B.dptr_);
}
};

// B = syrk(A)
struct syrk {
template<typename xpu, typename DType>
Expand Down
241 changes: 241 additions & 0 deletions src/operator/tensor/la_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,8 @@ DMLC_REGISTER_PARAMETER(LaMatrixMacParam);
DMLC_REGISTER_PARAMETER(LaMatrixMultParam);
DMLC_REGISTER_PARAMETER(LaCholeskyParam);
DMLC_REGISTER_PARAMETER(LaTriangMatrixMultParam);
DMLC_REGISTER_PARAMETER(LaDiagParam);
DMLC_REGISTER_PARAMETER(LaTrianParam);
DMLC_REGISTER_PARAMETER(LaSyrkParam);

NNVM_REGISTER_OP(_linalg_gemm)
Expand Down Expand Up @@ -461,6 +463,245 @@ NNVM_REGISTER_OP(_backward_linalg_sumlogdiag)
.set_attr<nnvm::TIsBackward>("TIsBackward", true)
.set_attr<FCompute>("FCompute<cpu>", LaOpBackward<cpu, 2, 2, 2, 1, sumlogdiag_backward>);

NNVM_REGISTER_OP(_linalg_extractdiag)
.add_alias("linalg_extractdiag")
.describe(R"code(Extracts the diagonal entries of a square matrix.
Input is a tensor *A* of dimension *n >= 2*.
If *n=2*, then *A* represents a single square matrix which diagonal elements get extracted as a 1-dimensional tensor.
If *n>2*, then *A* represents a batch of square matrices on the trailing two dimensions. The extracted diagonals are
returned as an *n-1*-dimensional tensor.
.. note:: The operator supports float32 and float64 data types only.
Examples::
// Single matrix diagonal extraction
A = [[1.0, 2.0],
[3.0, 4.0]]
extractdiag(A) = [1.0, 4.0]
extractdiag(A, 1) = [2.0]
// Batch matrix diagonal extraction
A = [[[1.0, 2.0],
[3.0, 4.0]],
[[5.0, 6.0],
[7.0, 8.0]]]
extractdiag(A) = [[1.0, 4.0],
[5.0, 8.0]]
)code" ADD_FILELINE)
.set_num_inputs(1)
.set_num_outputs(1)
.set_attr_parser(ParamParser<LaDiagParam>)
.set_attr<nnvm::FListInputNames>("FListInputNames", [](const NodeAttrs& attrs)
{ return std::vector<std::string>{"A"}; } )
.set_attr<mxnet::FInferShape>("FInferShape", LaDiagTrianShape<true, true>)
.set_attr<nnvm::FInferType>("FInferType", ElemwiseType<1, 1>)
.set_attr<FCompute>("FCompute<cpu>", LaOpForward<cpu, 2, 1, 1, 1, copydiag>)
.set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseNone{"_backward_linalg_extractdiag"})
.add_argument("A", "NDArray-or-Symbol", "Tensor of square matrices")
.add_arguments(LaDiagParam::__FIELDS__());

NNVM_REGISTER_OP(_backward_linalg_extractdiag)
.set_num_inputs(1)
.set_num_outputs(1)
.set_attr_parser(ParamParser<LaDiagParam>)
.set_attr<FResourceRequest>("FResourceRequest", [](const NodeAttrs& attrs)
{ return std::vector<ResourceRequest>{ResourceRequest::kTempSpace}; })
.set_attr<nnvm::TIsBackward>("TIsBackward", true)
.set_attr<FCompute>("FCompute<cpu>", LaOpBackward<cpu, 1, 2, 1, 1, copydiag>);

NNVM_REGISTER_OP(_linalg_makediag)
.add_alias("linalg_makediag")
.describe(R"code(Constructs a square matrix with the input as diagonal.
Input is a tensor *A* of dimension *n >= 1*.
If *n=1*, then *A* represents the diagonal entries of a single square matrix.
This matrix will be returned as a 2-dimensional tensor.
If *n>1*, then *A* represents a batch of diagonals of square matrices. The batch of diagonal
matrices will be returned as an *n+1*-dimensional tensor.
.. note:: The operator supports float32 and float64 data types only.
Examples::
// Single diagonal matrix construction
A = [1.0, 2.0]
makediag(A) = [[1.0, 0.0],
[0.0, 2.0]]
makediag(A, 1) = [[0.0, 1.0, 0.0],
[0.0, 0.0, 2.0],
[0.0, 0.0, 0.0]]
// Batch diagonal matrix construction
A = [[1.0, 2.0],
[3.0, 4.0]]
makediag(A) = [[[1.0, 0.0],
[0.0, 2.0]],
[[3.0, 0.0],
[0.0, 4.0]]]
)code" ADD_FILELINE)
.set_num_inputs(1)
.set_num_outputs(1)
.set_attr_parser(ParamParser<LaDiagParam>)
.set_attr<nnvm::FListInputNames>("FListInputNames", [](const NodeAttrs& attrs)
{ return std::vector<std::string>{"A"}; } )
.set_attr<mxnet::FInferShape>("FInferShape", LaDiagTrianShape<true, false>)
.set_attr<nnvm::FInferType>("FInferType", ElemwiseType<1, 1>)
.set_attr<FCompute>("FCompute<cpu>", LaOpForward<cpu, 1, 2, 1, 1, copydiag>)
.set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseNone{"_backward_linalg_makediag"})
.add_argument("A", "NDArray-or-Symbol", "Tensor of diagonal entries")
.add_arguments(LaDiagParam::__FIELDS__());

NNVM_REGISTER_OP(_backward_linalg_makediag)
.set_num_inputs(1)
.set_num_outputs(1)
.set_attr_parser(ParamParser<LaDiagParam>)
.set_attr<FResourceRequest>("FResourceRequest", [](const NodeAttrs& attrs)
{ return std::vector<ResourceRequest>{ResourceRequest::kTempSpace}; })
.set_attr<nnvm::TIsBackward>("TIsBackward", true)
.set_attr<FCompute>("FCompute<cpu>", LaOpBackward<cpu, 2, 1, 1, 1, copydiag>);

NNVM_REGISTER_OP(_linalg_extracttrian)
.add_alias("linalg_extracttrian")
.describe(R"code(Extracts a triangular sub-matrix from a square matrix.
Input is a tensor *A* of dimension *n >= 2*.
If *n=2*, then *A* represents a single square matrix from which a triangular sub-matrix is
extracted as a 1-dimensional tensor.
If *n>2*, then *A* represents a batch of square matrices on the trailing two dimensions. The
extracted triangular sub-matrices are returned as an *n-1*-dimensional tensor.
The *offset* and *lower* parameters determine the triangle to be extracted:
* When *offset = 0* either the lower or upper triangle with respect to the main
diagonal is extracted depending on the value of parameter *lower*.
* When *offset = k > 0* the upper triangle with respect to the k-th diagonal above the
main diagonal is extracted.
* When *offset = k < 0* the lower triangle with respect to the k-th diagonal below the
main diagonal is extracted.
.. note:: The operator supports float32 and float64 data types only.
Examples::
// Single triagonal extraction
A = [[1.0, 2.0],
[3.0, 4.0]]
extracttrian(A) = [1.0, 3.0, 4.0]
extracttrian(A, lower=False) = [1.0, 2.0, 4.0]
extracttrian(A, 1) = [2.0]
extracttrian(A, -1) = [3.0]
// Batch triagonal extraction
A = [[[1.0, 2.0],
[3.0, 4.0]],
[[5.0, 6.0],
[7.0, 8.0]]]
extracttrian(A) = [[1.0, 3.0, 4.0],
[5.0, 7.0, 8.0]]
)code" ADD_FILELINE)
.set_num_inputs(1)
.set_num_outputs(1)
.set_attr_parser(ParamParser<LaTrianParam>)
.set_attr<nnvm::FListInputNames>("FListInputNames", [](const NodeAttrs& attrs)
{ return std::vector<std::string>{"A"}; } )
.set_attr<mxnet::FInferShape>("FInferShape", LaDiagTrianShape<false, true>)
.set_attr<nnvm::FInferType>("FInferType", ElemwiseType<1, 1>)
.set_attr<FCompute>("FCompute<cpu>", LaOpForward<cpu, 2, 1, 1, 1, copytrian>)
.set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseNone{"_backward_linalg_extracttrian"})
.add_argument("A", "NDArray-or-Symbol", "Tensor of square matrices")
.add_arguments(LaTrianParam::__FIELDS__());

NNVM_REGISTER_OP(_backward_linalg_extracttrian)
.set_num_inputs(1)
.set_num_outputs(1)
.set_attr_parser(ParamParser<LaTrianParam>)
.set_attr<FResourceRequest>("FResourceRequest", [](const NodeAttrs& attrs)
{ return std::vector<ResourceRequest>{ResourceRequest::kTempSpace}; })
.set_attr<nnvm::TIsBackward>("TIsBackward", true)
.set_attr<FCompute>("FCompute<cpu>", LaOpBackward<cpu, 1, 2, 1, 1, copytrian>);

NNVM_REGISTER_OP(_linalg_maketrian)
.add_alias("linalg_maketrian")
.describe(R"code(Constructs a square matrix with the input representing a specific triangular sub-matrix.
This is basically the inverse of *linalg.extracttrian*. Input is a tensor *A* of dimension *n >= 1*.
If *n=1*, then *A* represents the entries of a triangular matrix which is lower triangular if *offset<0*
or *offset=0*, *lower=true*. The resulting matrix is derived by first constructing the square
matrix with the entries outside the triangle set to zero and then adding *offset*-times an additional
diagonal with zero entries to the square matrix.
If *n>1*, then *A* represents a batch of triangular sub-matrices. The batch of corresponding square
matrices is returned as an *n+1*-dimensional tensor.
.. note:: The operator supports float32 and float64 data types only.
Examples::
// Single matrix construction
A = [1.0, 2.0, 3.0]
maketrian(A) = [[1.0, 0.0],
[2.0, 3.0]]
maketrian(A, lower=false) = [[1.0, 2.0],
[0.0, 3.0]]
maketrian(A, offset=1) = [[0.0, 1.0, 2.0],
[0.0, 0.0, 3.0],
[0.0, 0.0, 0.0]]
maketrian(A, offset=-1) = [[0.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[2.0, 3.0, 0.0]]
// Batch matrix construction
A = [[1.0, 2.0, 3.0],
[4.0, 5.0, 6.0]]
maketrian(A) = [[[1.0, 0.0],
[2.0, 3.0]],
[[4.0, 0.0],
[5.0, 6.0]]]
maketrian(A, offset=1) = [[[0.0, 1.0, 2.0],
[0.0, 0.0, 3.0],
[0.0, 0.0, 0.0]],
[[0.0, 4.0, 5.0],
[0.0, 0.0, 6.0],
[0.0, 0.0, 0.0]]]
)code" ADD_FILELINE)
.set_num_inputs(1)
.set_num_outputs(1)
.set_attr_parser(ParamParser<LaTrianParam>)
.set_attr<nnvm::FListInputNames>("FListInputNames", [](const NodeAttrs& attrs)
{ return std::vector<std::string>{"A"}; } )
.set_attr<mxnet::FInferShape>("FInferShape", LaDiagTrianShape<false, false>)
.set_attr<nnvm::FInferType>("FInferType", ElemwiseType<1, 1>)
.set_attr<FCompute>("FCompute<cpu>", LaOpForward<cpu, 1, 2, 1, 1, copytrian>)
.set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseNone{"_backward_linalg_maketrian"})
.add_argument("A", "NDArray-or-Symbol", "Tensor of triangular matrices stored as vectors")
.add_arguments(LaTrianParam::__FIELDS__());

NNVM_REGISTER_OP(_backward_linalg_maketrian)
.set_num_inputs(1)
.set_num_outputs(1)
.set_attr_parser(ParamParser<LaTrianParam>)
.set_attr<FResourceRequest>("FResourceRequest", [](const NodeAttrs& attrs)
{ return std::vector<ResourceRequest>{ResourceRequest::kTempSpace}; })
.set_attr<nnvm::TIsBackward>("TIsBackward", true)
.set_attr<FCompute>("FCompute<cpu>", LaOpBackward<cpu, 2, 1, 1, 1, copytrian>);

NNVM_REGISTER_OP(_linalg_syrk)
.add_alias("linalg_syrk")
.describe(R"code(Multiplication of matrix with its transpose.
Expand Down
Loading

0 comments on commit 7d5e1db

Please sign in to comment.