Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
Fix
Browse files Browse the repository at this point in the history
  • Loading branch information
D-Roberts committed Feb 14, 2020
1 parent cc236ae commit a245021
Show file tree
Hide file tree
Showing 2 changed files with 14 additions and 15 deletions.
18 changes: 9 additions & 9 deletions src/operator/numpy/random/np_weibull_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -61,9 +61,9 @@ struct NumpyWeibullParam : public dmlc::Parameter<NumpyWeibullParam> {

template <typename DType>
struct scalar_weibull_kernel {
MSHADOW_XINLINE static void Map(index_t i, float a, float *threshold,
MSHADOW_XINLINE static void Map(index_t i, float a, float *noise,
DType *out) {
out[i] = powf(-log(threshold[i]), DType(1.0/a));
out[i] = powf(-log(noise[i]), DType(1.0/a));
}
};

Expand All @@ -84,11 +84,13 @@ struct weibull_kernel {
MSHADOW_XINLINE static void Map(index_t i,
const Shape<ndim> &stride,
const Shape<ndim> &oshape,
IType *aparams, float* threshold, OType *out) {
IType *aparams, float *noise, OType *out) {
Shape<ndim> coord = unravel(i, oshape);
auto idx = static_cast<index_t>(dot(coord, stride));
threshold[i] = -log(threshold[i]);
out[i] = powf(threshold[i], IType(1.0/aparams[idx]));
noise[i] = -log(noise[i]);
out[i] = powf(noise[i], IType(1.0/aparams[idx]));
// get grad
noise[i] = -log(noise[i]) * out[i] * (1.0/(aparams[idx] * aparams[idx]));
}
};

Expand Down Expand Up @@ -167,10 +169,8 @@ inline void ScalarWeibullReparamBackwardImpl(const OpContext& ctx,
ReduceWorkspaceSize<ndim, DType>(s, igrad.shape_, req[0], ograd.shape_);
Tensor<xpu, 1, char> workspace =
ctx.requested[0].get_space_typed<xpu, 1, char>(Shape1(workspace_size), s);
// igrad=sum(ograd*samples*log(noise)*reciprocal_grad_a)
// temp = sum(ograd*samples*noise)
Reduce<red::sum, ndim, DType, op::mshadow_op::mul, op::mshadow_op::mul>(
s, igrad, req[0], workspace, ograd, noise, samples);
Reduce<red::sum, ndim, DType, op::mshadow_op::mul, op::mshadow_op::left>(
s, igrad, req[0], workspace, ograd, noise, noise);
}

template<typename xpu>
Expand Down
11 changes: 5 additions & 6 deletions tests/python/unittest/test_numpy_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -3941,8 +3941,8 @@ def hybrid_forward(self, F, a):

output_shapes = [
(3, 2),
# (4, 3, 2, 2),
# (3, 4, 5)
(4, 3, 2, 2),
(3, 4, 5)
]
for hybridize in [False, True]:
for out_shape in output_shapes:
Expand All @@ -3954,12 +3954,11 @@ def hybrid_forward(self, F, a):
with mx.autograd.record():
mx_out = test_w_grad(a)
mx_out.backward()

# gradient formula calculus (a=1)
# formula_grad = - np.sum(mx_out * np.log(mx_out))
formula_grad = - mx_out * np.log(mx_out)
assert a.grad.shape == out_shape
# assert_almost_equal(a.grad.asnumpy().sum(), formula_grad.asnumpy().sum(), rtol=1e-3, atol=1e-5)
# assert_almost_equal(a.grad.asnumpy().sum(), (mx_out*mx_out).asnumpy().sum(), rtol=1e-3, atol=1e-5)
assert_almost_equal(a.grad.asnumpy().sum(), formula_grad.asnumpy().sum(), rtol=1e-3, atol=1e-5)


@with_seed()
Expand Down

0 comments on commit a245021

Please sign in to comment.