Skip to content

Commit

Permalink
[MXNET-853] Fix for smooth_l1 operator scalar default value (apache#1…
Browse files Browse the repository at this point in the history
…2284)

* changed smooth_l1 operator implementation to not use helper macros since
they do not provide enough support for checking for arguments and
setting custom default values

* added testcase for smooth_l1 operator scalar default value

* fixed whitespace

* added curly braces for if/else to match mxnet style

* added more curly braces
  • Loading branch information
samskalicky authored and anirudh2290 committed Sep 19, 2018
1 parent 5e3a7e5 commit da53bb2
Show file tree
Hide file tree
Showing 2 changed files with 33 additions and 6 deletions.
35 changes: 29 additions & 6 deletions src/operator/tensor/elemwise_binary_scalar_op_extended.cc
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ MXNET_OPERATOR_REGISTER_BINARY(_backward_hypot_scalar)
.set_attr<FCompute>("FCompute<cpu>", BinaryScalarOp::Backward<
cpu, mshadow_op::hypot_grad_left>);

MXNET_OPERATOR_REGISTER_BINARY_SCALAR(smooth_l1)
NNVM_REGISTER_OP(smooth_l1)
.describe(R"code(Calculate Smooth L1 Loss(lhs, scalar) by summing
.. math::
Expand All @@ -98,17 +98,40 @@ where :math:`x` is an element of the tensor *lhs* and :math:`\sigma` is the scal
Example::
smooth_l1([1, 2, 3, 4]) = [0.5, 1.5, 2.5, 3.5]
smooth_l1([1, 2, 3, 4], scalar=1) = [0.5, 1.5, 2.5, 3.5]
)code" ADD_FILELINE)
.set_attr<FCompute>("FCompute<cpu>", BinaryScalarOp::Compute<
cpu, mshadow_op::smooth_l1_loss>)
.set_num_inputs(1)
.set_num_outputs(1)
.set_attr_parser([](NodeAttrs* attrs) {
if (attrs->dict.find("scalar") != attrs->dict.end()) {
attrs->parsed = std::stod(attrs->dict["scalar"]);
} else {
attrs->parsed = 1.0;
}
})
.set_attr<nnvm::FInferShape>("FInferShape", ElemwiseShape<1, 1>)
.set_attr<nnvm::FInferType>("FInferType", ElemwiseType<1, 1>)
.set_attr<nnvm::FInplaceOption>("FInplaceOption",
[](const NodeAttrs& attrs){
return std::vector<std::pair<int, int> >{{0, 0}};
})
.add_argument("data", "NDArray-or-Symbol", "source input")
.add_argument("scalar", "float", "scalar input")
.set_attr<FCompute>("FCompute<cpu>", BinaryScalarOp::Compute<cpu, mshadow_op::smooth_l1_loss>)
.set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseIn{ "_backward_smooth_l1" });

MXNET_OPERATOR_REGISTER_BINARY(_backward_smooth_l1)
.set_attr_parser([](NodeAttrs *attrs) { attrs->parsed = std::stod(attrs->dict["scalar"]); })
.set_attr<FCompute>("FCompute<cpu>", BinaryScalarOp::Backward<
cpu, mshadow_op::smooth_l1_gradient>);
.set_attr_parser([](NodeAttrs *attrs) {
if (attrs->dict.find("scalar") != attrs->dict.end()) {
attrs->parsed = std::stod(attrs->dict["scalar"]);
} else {
attrs->parsed = 1.0;
}
})
.set_attr<FCompute>("FCompute<cpu>",
BinaryScalarOp::Backward<cpu, mshadow_op::smooth_l1_gradient>);

} // namespace op
} // namespace mxnet
4 changes: 4 additions & 0 deletions tests/python/unittest/test_operator.py
Original file line number Diff line number Diff line change
Expand Up @@ -5956,6 +5956,10 @@ def test_unary_math_operators():
lambda x: np_smooth_l1(x, 1.),
lambda x: np_smooth_l1_grad(x, 1.),
-2.0, 2.0],
'smooth_l1_sig_default': [lambda x: mx.sym.smooth_l1(x),
lambda x: np_smooth_l1(x, 1.),
lambda x: np_smooth_l1_grad(x, 1.),
-2.0, 2.0],
'smooth_l1_sig2': [lambda x: mx.sym.smooth_l1(x, scalar=2.),
lambda x: np_smooth_l1(x, 2.),
lambda x: np_smooth_l1_grad(x, 2.),
Expand Down

0 comments on commit da53bb2

Please sign in to comment.