From 9f35c9978e37db36aa9be35c1d9f00cc687eeb24 Mon Sep 17 00:00:00 2001 From: kshitij12345 Date: Thu, 11 Jul 2019 19:46:58 +0530 Subject: [PATCH 1/9] support arcsin, arccos for higher order grad --- src/operator/tensor/elemwise_unary_op_trig.cc | 61 ++++++++++++++++++- 1 file changed, 59 insertions(+), 2 deletions(-) diff --git a/src/operator/tensor/elemwise_unary_op_trig.cc b/src/operator/tensor/elemwise_unary_op_trig.cc index 13410e9422a5..409f99cba799 100644 --- a/src/operator/tensor/elemwise_unary_op_trig.cc +++ b/src/operator/tensor/elemwise_unary_op_trig.cc @@ -161,7 +161,35 @@ The storage type of ``arcsin`` output depends upon the input storage type: .set_attr("FGradient", ElemwiseGradUseIn{ "_backward_arcsin" }); MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(_backward_arcsin, - unary_bwd); + unary_bwd) +.set_attr("FGradient", + [](const nnvm::NodePtr& n, const std::vector& ograds) { + // ograds[0]: head_grad_grads (dL/dy_grad) + // inputs[0]: dL/dy + // inputs[1]: x (ElemwiseGradUseIn) + // f(x) = arcsin(x) + // n: f'(x) = 1/(1-x^2)^1/2 + // f''(x) = f'(x) * x/(1-x^2) + auto dydx = n->inputs[0]; + auto x = n->inputs[1]; + auto dydx_mul_grad_x = nnvm::NodeEntry{n}; + auto grad_x = MakeNode("elemwise_div", n->attrs.name + "_grad_x", + {dydx_mul_grad_x, dydx}, nullptr, &n); + auto grad_x_square = MakeNode("square", n->attrs.name + "_grad_x_square", + {nnvm::NodeEntry{grad_x}}, nullptr, &n); + auto grad_x_square_mul_x = MakeNode("elemwise_mul", n->attrs.name + "_grad_x_square_mul_x", + {nnvm::NodeEntry{grad_x_square}, x}, nullptr, &n); + auto grad_grad_x = MakeNode("elemwise_mul", n->attrs.name + "_grad_grad_x", + {dydx_mul_grad_x, nnvm::NodeEntry{grad_x_square_mul_x}}, + nullptr, &n); + + std::vector ret; + ret.emplace_back(MakeNode("elemwise_mul", n->attrs.name + "_backward_grad_grad", + {ograds[0], nnvm::NodeEntry{grad_x}}, nullptr, &n)); + ret.emplace_back(MakeNode("elemwise_mul", n->attrs.name + "_backward_grad_grad_in", + {ograds[0], nnvm::NodeEntry{grad_grad_x}}, nullptr, &n)); + return ret; + }); // arccos MXNET_OPERATOR_REGISTER_UNARY_WITH_SPARSE_DR(arccos, cpu, mshadow_op::arccos) @@ -180,7 +208,36 @@ The storage type of ``arccos`` output is always dense .set_attr("FGradient", ElemwiseGradUseIn{ "_backward_arccos" }); MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(_backward_arccos, - unary_bwd); + unary_bwd) +.set_attr("FGradient", + [](const nnvm::NodePtr& n, const std::vector& ograds) { + // ograds[0]: head_grad_grads (dL/dy_grad) + // inputs[0]: dL/dy + // inputs[1]: x (ElemwiseGradUseIn) + // f(x) = arccos(x) + // n: f'(x) = -1/(1-x^2)^1/2 + // f''(x) = f'(x) * x/(1-x^2) + auto dydx = n->inputs[0]; + auto x = n->inputs[1]; + auto dydx_mul_grad_x = nnvm::NodeEntry{n}; + auto grad_x = MakeNode("elemwise_div", n->attrs.name + "_grad_x", + {dydx_mul_grad_x, dydx}, nullptr, &n); + auto grad_x_square = MakeNode("square", n->attrs.name + "_grad_x_square", + {nnvm::NodeEntry{grad_x}}, nullptr, &n); + auto grad_x_square_mul_x = MakeNode("elemwise_mul", n->attrs.name + "_grad_x_square_mul_x", + {nnvm::NodeEntry{grad_x_square}, x}, nullptr, &n); + auto grad_grad_x = MakeNode("elemwise_mul", n->attrs.name + "_grad_grad_x", + {dydx_mul_grad_x, nnvm::NodeEntry{grad_x_square_mul_x}}, + nullptr, &n); + + std::vector ret; + ret.emplace_back(MakeNode("elemwise_mul", n->attrs.name + "_backward_grad_grad", + {ograds[0], nnvm::NodeEntry{grad_x}}, nullptr, &n)); + ret.emplace_back(MakeNode("elemwise_mul", n->attrs.name + "_backward_grad_grad_in", + {ograds[0], nnvm::NodeEntry{grad_grad_x}}, nullptr, &n)); + return ret; + }); + // arctan MXNET_OPERATOR_REGISTER_UNARY_WITH_RSP_CSR(arctan, cpu, mshadow_op::arctan) From df3551b50d9200d30f55abf39264b0c1dc82ac4c Mon Sep 17 00:00:00 2001 From: kshitij12345 Date: Thu, 11 Jul 2019 19:51:36 +0530 Subject: [PATCH 2/9] add relevant tests --- .../python/unittest/test_higher_order_grad.py | 38 +++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/tests/python/unittest/test_higher_order_grad.py b/tests/python/unittest/test_higher_order_grad.py index 0f07d014d435..041aadfcbebf 100644 --- a/tests/python/unittest/test_higher_order_grad.py +++ b/tests/python/unittest/test_higher_order_grad.py @@ -50,6 +50,44 @@ def grad_grad_op(x): check_second_order_unary(array, cos, grad_grad_op) +@with_seed() +def test_arcsin(): + def arcsin(x): + return nd.arcsin(x) + + def grad_grad_op(x): + return x / nd.sqrt((1-x**2)**3) + + for dim in range(1, 5): + shape = rand_shape_nd(dim) + array = random_arrays(shape) + # Hack: Decrease std_dev to make + # sure all elements + # are in range -1 to 1 + # i.e. Domain of arcsin + array *= 0.2 + check_second_order_unary(array, arcsin, grad_grad_op) + + +@with_seed() +def test_arccos(): + def arccos(x): + return nd.arccos(x) + + def grad_grad_op(x): + return -x / nd.sqrt((1-x**2)**3) + + for dim in range(1, 5): + shape = rand_shape_nd(dim) + array = random_arrays(shape) + # Hack: Decrease std_dev to make + # sure all elements + # are in range -1 to 1 + # i.e. Domain of arccos + array *= 0.2 + check_second_order_unary(array, arccos, grad_grad_op) + + @with_seed() def test_relu(): def relu(x): From 7daaf765de045fc7839e8639d77cf08c10f3fe9c Mon Sep 17 00:00:00 2001 From: kshitij12345 Date: Thu, 11 Jul 2019 20:07:02 +0530 Subject: [PATCH 3/9] add small note for computation --- src/operator/tensor/elemwise_unary_op_trig.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/operator/tensor/elemwise_unary_op_trig.cc b/src/operator/tensor/elemwise_unary_op_trig.cc index 409f99cba799..9ad6baa01537 100644 --- a/src/operator/tensor/elemwise_unary_op_trig.cc +++ b/src/operator/tensor/elemwise_unary_op_trig.cc @@ -170,6 +170,7 @@ MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(_backward_arcsin, // f(x) = arcsin(x) // n: f'(x) = 1/(1-x^2)^1/2 // f''(x) = f'(x) * x/(1-x^2) + // Note: x/(1-x^2) = x * f'(x)^2 auto dydx = n->inputs[0]; auto x = n->inputs[1]; auto dydx_mul_grad_x = nnvm::NodeEntry{n}; @@ -217,6 +218,7 @@ MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(_backward_arccos, // f(x) = arccos(x) // n: f'(x) = -1/(1-x^2)^1/2 // f''(x) = f'(x) * x/(1-x^2) + // Note: x/(1-x^2) = x * f'(x)^2 auto dydx = n->inputs[0]; auto x = n->inputs[1]; auto dydx_mul_grad_x = nnvm::NodeEntry{n}; From 362632b85ba318aa20633cbc1c90a79114bf9240 Mon Sep 17 00:00:00 2001 From: kshitij12345 Date: Fri, 26 Jul 2019 20:36:43 +0530 Subject: [PATCH 4/9] update comments --- src/operator/tensor/elemwise_unary_op_trig.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/operator/tensor/elemwise_unary_op_trig.cc b/src/operator/tensor/elemwise_unary_op_trig.cc index 9ad6baa01537..f2ad5c325de0 100644 --- a/src/operator/tensor/elemwise_unary_op_trig.cc +++ b/src/operator/tensor/elemwise_unary_op_trig.cc @@ -164,7 +164,7 @@ MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(_backward_arcsin, unary_bwd) .set_attr("FGradient", [](const nnvm::NodePtr& n, const std::vector& ograds) { - // ograds[0]: head_grad_grads (dL/dy_grad) + // ograds[0]: head_grad_grads (dL/dxgrad) // inputs[0]: dL/dy // inputs[1]: x (ElemwiseGradUseIn) // f(x) = arcsin(x) @@ -212,7 +212,7 @@ MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(_backward_arccos, unary_bwd) .set_attr("FGradient", [](const nnvm::NodePtr& n, const std::vector& ograds) { - // ograds[0]: head_grad_grads (dL/dy_grad) + // ograds[0]: head_grad_grads (dL/dxgrad) // inputs[0]: dL/dy // inputs[1]: x (ElemwiseGradUseIn) // f(x) = arccos(x) From 4d7758437f7b12d7cfe4a4cdf72efb64530e71a9 Mon Sep 17 00:00:00 2001 From: kshitij12345 Date: Thu, 12 Sep 2019 22:52:37 +0530 Subject: [PATCH 5/9] use NodeOpGen --- src/operator/tensor/elemwise_unary_op_trig.cc | 42 +++++++------------ 1 file changed, 16 insertions(+), 26 deletions(-) diff --git a/src/operator/tensor/elemwise_unary_op_trig.cc b/src/operator/tensor/elemwise_unary_op_trig.cc index 1b0b3846d1db..3da26fa271c1 100644 --- a/src/operator/tensor/elemwise_unary_op_trig.cc +++ b/src/operator/tensor/elemwise_unary_op_trig.cc @@ -201,21 +201,16 @@ MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(_backward_arcsin, auto dydx = n->inputs[0]; auto x = n->inputs[1]; auto dydx_mul_grad_x = nnvm::NodeEntry{n}; - auto grad_x = MakeNode("elemwise_div", n->attrs.name + "_grad_x", - {dydx_mul_grad_x, dydx}, nullptr, &n); - auto grad_x_square = MakeNode("square", n->attrs.name + "_grad_x_square", - {nnvm::NodeEntry{grad_x}}, nullptr, &n); - auto grad_x_square_mul_x = MakeNode("elemwise_mul", n->attrs.name + "_grad_x_square_mul_x", - {nnvm::NodeEntry{grad_x_square}, x}, nullptr, &n); - auto grad_grad_x = MakeNode("elemwise_mul", n->attrs.name + "_grad_grad_x", - {dydx_mul_grad_x, nnvm::NodeEntry{grad_x_square_mul_x}}, - nullptr, &n); + auto op = mxnet::util::NodeOpGen{n}; + + auto grad_x = op.div(dydx_mul_grad_x, dydx); + auto grad_x_square = op.square(grad_x); + auto grad_x_square_mul_x = op.mul(grad_x_square, x); + auto grad_grad_x = op.mul(dydx_mul_grad_x, grad_x_square_mul_x); std::vector ret; - ret.emplace_back(MakeNode("elemwise_mul", n->attrs.name + "_backward_grad_grad", - {ograds[0], nnvm::NodeEntry{grad_x}}, nullptr, &n)); - ret.emplace_back(MakeNode("elemwise_mul", n->attrs.name + "_backward_grad_grad_in", - {ograds[0], nnvm::NodeEntry{grad_grad_x}}, nullptr, &n)); + ret.emplace_back(op.mul(ograds[0], grad_x)); + ret.emplace_back(op.mul(ograds[0], grad_grad_x)); return ret; }); @@ -249,21 +244,16 @@ MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(_backward_arccos, auto dydx = n->inputs[0]; auto x = n->inputs[1]; auto dydx_mul_grad_x = nnvm::NodeEntry{n}; - auto grad_x = MakeNode("elemwise_div", n->attrs.name + "_grad_x", - {dydx_mul_grad_x, dydx}, nullptr, &n); - auto grad_x_square = MakeNode("square", n->attrs.name + "_grad_x_square", - {nnvm::NodeEntry{grad_x}}, nullptr, &n); - auto grad_x_square_mul_x = MakeNode("elemwise_mul", n->attrs.name + "_grad_x_square_mul_x", - {nnvm::NodeEntry{grad_x_square}, x}, nullptr, &n); - auto grad_grad_x = MakeNode("elemwise_mul", n->attrs.name + "_grad_grad_x", - {dydx_mul_grad_x, nnvm::NodeEntry{grad_x_square_mul_x}}, - nullptr, &n); + auto op = mxnet::util::NodeOpGen{n}; + + auto grad_x = op.div(dydx_mul_grad_x, dydx); + auto grad_x_square = op.square(grad_x); + auto grad_x_square_mul_x = op.mul(grad_x_square, x); + auto grad_grad_x = op.mul(dydx_mul_grad_x, grad_x_square_mul_x); std::vector ret; - ret.emplace_back(MakeNode("elemwise_mul", n->attrs.name + "_backward_grad_grad", - {ograds[0], nnvm::NodeEntry{grad_x}}, nullptr, &n)); - ret.emplace_back(MakeNode("elemwise_mul", n->attrs.name + "_backward_grad_grad_in", - {ograds[0], nnvm::NodeEntry{grad_grad_x}}, nullptr, &n)); + ret.emplace_back(op.mul(ograds[0], grad_x)); + ret.emplace_back(op.mul(ograds[0], grad_grad_x)); return ret; }); From 5e2c3e6220371db7ee99dd192789e95f0b9422f5 Mon Sep 17 00:00:00 2001 From: kshitij12345 Date: Sat, 14 Sep 2019 19:16:18 +0530 Subject: [PATCH 6/9] retrigger CI From 743cc7067a35499ee38ce3da55ff7e7791b26403 Mon Sep 17 00:00:00 2001 From: kshitij12345 Date: Wed, 9 Oct 2019 21:22:45 +0530 Subject: [PATCH 7/9] address comment * rename grad_x -> x_grad --- src/operator/tensor/elemwise_unary_op_trig.cc | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/src/operator/tensor/elemwise_unary_op_trig.cc b/src/operator/tensor/elemwise_unary_op_trig.cc index 3da26fa271c1..2b888224eee5 100644 --- a/src/operator/tensor/elemwise_unary_op_trig.cc +++ b/src/operator/tensor/elemwise_unary_op_trig.cc @@ -203,14 +203,14 @@ MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(_backward_arcsin, auto dydx_mul_grad_x = nnvm::NodeEntry{n}; auto op = mxnet::util::NodeOpGen{n}; - auto grad_x = op.div(dydx_mul_grad_x, dydx); - auto grad_x_square = op.square(grad_x); - auto grad_x_square_mul_x = op.mul(grad_x_square, x); - auto grad_grad_x = op.mul(dydx_mul_grad_x, grad_x_square_mul_x); + auto x_grad = op.div(dydx_mul_grad_x, dydx); + auto x_grad_square = op.square(x_grad); + auto x_grad_square_mul_x = op.mul(x_grad_square, x); + auto x_grad_grad = op.mul(dydx_mul_grad_x, x_grad_square_mul_x); std::vector ret; - ret.emplace_back(op.mul(ograds[0], grad_x)); - ret.emplace_back(op.mul(ograds[0], grad_grad_x)); + ret.emplace_back(op.mul(ograds[0], x_grad)); + ret.emplace_back(op.mul(ograds[0], x_grad_grad)); return ret; }); @@ -246,14 +246,14 @@ MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(_backward_arccos, auto dydx_mul_grad_x = nnvm::NodeEntry{n}; auto op = mxnet::util::NodeOpGen{n}; - auto grad_x = op.div(dydx_mul_grad_x, dydx); - auto grad_x_square = op.square(grad_x); - auto grad_x_square_mul_x = op.mul(grad_x_square, x); - auto grad_grad_x = op.mul(dydx_mul_grad_x, grad_x_square_mul_x); + auto x_grad = op.div(dydx_mul_grad_x, dydx); + auto x_grad_square = op.square(x_grad); + auto x_grad_square_mul_x = op.mul(x_grad_square, x); + auto x_grad_grad = op.mul(dydx_mul_grad_x, x_grad_square_mul_x); std::vector ret; - ret.emplace_back(op.mul(ograds[0], grad_x)); - ret.emplace_back(op.mul(ograds[0], grad_grad_x)); + ret.emplace_back(op.mul(ograds[0], x_grad)); + ret.emplace_back(op.mul(ograds[0], x_grad_grad)); return ret; }); From 211b3d4dbc9480df433f06806c3b58a6c8fbfa1d Mon Sep 17 00:00:00 2001 From: kshitij12345 Date: Sun, 27 Oct 2019 15:14:23 +0530 Subject: [PATCH 8/9] retrigger CI From 8b71be5b01e8bb1d25358480f7dedcf32613b55e Mon Sep 17 00:00:00 2001 From: kshitij12345 Date: Fri, 1 Nov 2019 18:42:26 +0530 Subject: [PATCH 9/9] retrigger CI