Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
Add gauss err function operator (#13229)
Browse files Browse the repository at this point in the history
* erf

register gpu

* add doc
  • Loading branch information
eric-haibin-lin authored and szha committed Nov 14, 2018
1 parent f79bb18 commit f5ba267
Show file tree
Hide file tree
Showing 8 changed files with 38 additions and 0 deletions.
1 change: 1 addition & 0 deletions docs/api/python/ndarray/ndarray.md
Original file line number Diff line number Diff line change
Expand Up @@ -656,6 +656,7 @@ The `ndarray` package provides several classes:
log_softmax
relu
sigmoid
erf
```

### More
Expand Down
1 change: 1 addition & 0 deletions docs/api/python/symbol/symbol.md
Original file line number Diff line number Diff line change
Expand Up @@ -657,6 +657,7 @@ Composite multiple symbols into a new one by an operator.
log_softmax
relu
sigmoid
erf
```

### More
Expand Down
2 changes: 2 additions & 0 deletions src/operator/math_functions-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,8 @@ double name(double a, double b) { \
return ::name(a, b); \
}

MXNET_UNARY_MATH_FUNC(erf)

MXNET_UNARY_MATH_FUNC(exp)

MXNET_UNARY_MATH_FUNC(expm1)
Expand Down
4 changes: 4 additions & 0 deletions src/operator/mshadow_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -169,6 +169,10 @@ struct softrelu : public mxnet_op::tunable {

MXNET_UNARY_MATH_OP(softrelu_grad, -math::expm1(-a));

MXNET_UNARY_MATH_OP(erf_grad, 2.0 / math::sqrt(PI) * math::exp(-(a * a)));

MXNET_SIMPLE_UNARY_MATH_OP(erf);

MXNET_SIMPLE_UNARY_MATH_OP(exp);

MXNET_SIMPLE_UNARY_MATH_OP(expm1);
Expand Down
2 changes: 2 additions & 0 deletions src/operator/operator_tune.cc
Original file line number Diff line number Diff line change
Expand Up @@ -235,6 +235,8 @@ IMPLEMENT_UNARY_WORKLOAD_BWD(mxnet::op::mshadow_op::log2_grad); // NOLINT()
IMPLEMENT_UNARY_WORKLOAD_FWD(mxnet::op::mshadow_op::log10); // NOLINT()
IMPLEMENT_UNARY_WORKLOAD_BWD(mxnet::op::mshadow_op::log10_grad); // NOLINT()
IMPLEMENT_UNARY_WORKLOAD_FWD(mxnet::op::mshadow_op::sin); // NOLINT()
IMPLEMENT_UNARY_WORKLOAD_FWD(mxnet::op::mshadow_op::erf); // NOLINT()
IMPLEMENT_UNARY_WORKLOAD_BWD(mxnet::op::mshadow_op::erf_grad); // NOLINT()
IMPLEMENT_UNARY_WORKLOAD_BWD(mxnet::op::mshadow_op::sin_grad); // NOLINT()
IMPLEMENT_UNARY_WORKLOAD_FWD(mxnet::op::mshadow_op::sinh); // NOLINT()
IMPLEMENT_UNARY_WORKLOAD_BWD(mxnet::op::mshadow_op::sinh_grad); // NOLINT()
Expand Down
16 changes: 16 additions & 0 deletions src/operator/tensor/elemwise_unary_op_basic.cc
Original file line number Diff line number Diff line change
Expand Up @@ -886,6 +886,22 @@ The storage type of ``cbrt`` output depends upon the input storage type:
MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(_backward_cbrt,
unary_bwd<mshadow_op::cube_root_grad>);

// erf
MXNET_OPERATOR_REGISTER_UNARY(erf)
.describe(R"code(Returns element-wise gauss error function of the input.
Example::
erf([0, -1., 10.]) = [0., -0.8427, 1.]
)code" ADD_FILELINE)
.set_attr<FCompute>("FCompute<cpu>", UnaryOp::Compute<cpu, mshadow_op::erf>)
.set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseIn{"_backward_erf"});

MXNET_OPERATOR_REGISTER_BINARY(_backward_erf)
.set_attr<FCompute>("FCompute<cpu>",
ElemwiseBinaryOp::Compute<cpu, unary_bwd<mshadow_op::erf_grad>>);

// rcbrt
MXNET_OPERATOR_REGISTER_UNARY(rcbrt)
.describe(R"code(Returns element-wise inverse cube-root value of the input.
Expand Down
8 changes: 8 additions & 0 deletions src/operator/tensor/elemwise_unary_op_basic.cu
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,14 @@ NNVM_REGISTER_OP(_backward_softsign)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::softsign_grad>>);

// erf
NNVM_REGISTER_OP(erf)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::erf>);

NNVM_REGISTER_OP(_backward_erf)
.set_attr<FCompute>("FCompute<gpu>",
ElemwiseBinaryOp::Compute<gpu, unary_bwd<mshadow_op::erf_grad>>);

// copy
NNVM_REGISTER_OP(_copy)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>)
Expand Down
4 changes: 4 additions & 0 deletions tests/python/unittest/test_operator.py
Original file line number Diff line number Diff line change
Expand Up @@ -3496,6 +3496,10 @@ def test_special_functions_using_scipy():
mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x), 0.5, 0.5)

# erf
mathematical_core("erf", lambda x: mx.sym.erf(x), lambda x: scipy_special.erf(x),
lambda x: 2.0 / math.sqrt(math.pi) * math.exp(-(x ** 2)), 0.5, 0.5)


def rounding(name, forward_mxnet_call, forward_numpy_call, data_init=5., grad_init=2.):
data = mx.symbol.Variable('data')
Expand Down

0 comments on commit f5ba267

Please sign in to comment.