Skip to content

Commit

Permalink
fix acc_type_switch macro with extra tests (apache#14773)
Browse files Browse the repository at this point in the history
  • Loading branch information
haojin2 authored and haohuw committed Jun 23, 2019
1 parent 2a5dae8 commit 32f4f61
Show file tree
Hide file tree
Showing 4 changed files with 52 additions and 13 deletions.
4 changes: 4 additions & 0 deletions src/operator/mxnet_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -335,24 +335,28 @@ inline int get_num_threads<cpu>(const int N) {
{ \
typedef uint8_t DType; \
typedef uint32_t AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt8: \
{ \
typedef int8_t DType; \
typedef int32_t AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt32: \
{ \
typedef int32_t DType; \
typedef int64_t AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt64: \
{ \
typedef int64_t DType; \
typedef int64_t AType; \
{__VA_ARGS__} \
} \
break; \
default: \
Expand Down
13 changes: 13 additions & 0 deletions src/operator/tensor/broadcast_reduce-inl.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -617,6 +617,8 @@ void Reduce(Stream<gpu> *s, const TBlob& small, const OpReqType req,
ReduceImplConfig<ndim> config =
ConfigureReduceImpl<ndim, DType>(small.shape_, big.shape_, NULL, NULL);
if (safe_acc) {
// TODO(haojin2): Use real-only type swtich for windows temporarily due to CI issues.
#ifndef _WIN32
MXNET_ACC_TYPE_SWITCH(mshadow::DataType<DType>::kFlag, DataType, AType, {
typedef typename std::conditional<safe_acc, AType, DataType>::type AccType;
MSHADOW_TYPE_SWITCH(small.type_flag_, OType, {
Expand All @@ -626,6 +628,17 @@ void Reduce(Stream<gpu> *s, const TBlob& small, const OpReqType req,
stream, small, req, big, workspace, config);
});
});
#else
MXNET_REAL_ACC_TYPE_SWITCH(mshadow::DataType<DType>::kFlag, DataType, AType, {
typedef typename std::conditional<safe_acc, AType, DataType>::type AccType;
MSHADOW_TYPE_SWITCH(small.type_flag_, OType, {
typedef typename std::conditional<safe_acc, OType, DataType>::type OutType;
config = ConfigureReduceImpl<ndim, AccType>(small.shape_, big.shape_, NULL, NULL);
ReduceImpl<Reducer, ndim, AccType, DataType, OutType, OP>(
stream, small, req, big, workspace, config);
});
});
#endif
} else {
ReduceImpl<Reducer, ndim, DType, DType, DType, OP>(stream, small, req, big, workspace, config);
}
Expand Down
13 changes: 13 additions & 0 deletions src/operator/tensor/broadcast_reduce-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -239,6 +239,8 @@ void Reduce(Stream<cpu>* s, const TBlob& small, const OpReqType req,
N, M, req == kAddTo, big.dptr<DType>(), small.dptr<DType>(),
big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride);
} else {
// TODO(haojin2): Use real-only type swtich for windows temporarily due to CI issues.
#ifndef _WIN32
MXNET_ACC_TYPE_SWITCH(mshadow::DataType<DType>::kFlag, DataType, AType, {
typedef typename std::conditional<safe_acc, AType, DataType>::type AccType;
MSHADOW_TYPE_SWITCH(small.type_flag_, OType, {
Expand All @@ -248,6 +250,17 @@ void Reduce(Stream<cpu>* s, const TBlob& small, const OpReqType req,
big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride);
});
});
#else
MXNET_REAL_ACC_TYPE_SWITCH(mshadow::DataType<DType>::kFlag, DataType, AType, {
typedef typename std::conditional<safe_acc, AType, DataType>::type AccType;
MSHADOW_TYPE_SWITCH(small.type_flag_, OType, {
typedef typename std::conditional<safe_acc, OType, DataType>::type OutType;
seq_reduce_compute<Reducer, ndim, AccType, DataType, OutType, OP>(
N, M, req == kAddTo, big.dptr<DataType>(), small.dptr<OutType>(),
big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride);
});
});
#endif
}
}

Expand Down
35 changes: 22 additions & 13 deletions tests/python/unittest/test_operator.py
Original file line number Diff line number Diff line change
Expand Up @@ -3419,12 +3419,19 @@ def l2norm(input_data, axis=0, keepdims=True):
in_data_dim = random_sample([4,5,6], 1)[0]
in_shape = rand_shape_nd(in_data_dim, dim=5)
epsilon = 1e-3
acc_type = {np.float16: np.float32, np.float32: np.float32, np.float64: np.float64}
acc_type = {np.float16: np.float32, np.float32: np.float32, np.float64: np.float64,
np.int32: np.int32, np.int64: np.int64}
is_windows = sys.platform.startswith('win')
for order in [1, 2]:
for dtype in [np.float16, np.float32, np.float64]:
for dtype in [np.float16, np.float32, np.float64, np.int32, np.int64]:
for i in range(in_data_dim):
for out_dtype in ['float32', 'float64']:
for out_dtype in ['float32', 'float64', 'int32', 'int64']:
if (dtype == np.int32 or dtype == np.int64) and ('int' not in out_dtype or is_windows):
continue
if dtype != np.int32 and dtype != np.int64 and 'int' in out_dtype:
continue
backward_dtype = np.float32 if out_dtype == 'float32' else np.float64
skip_backward = 'int' in out_dtype
print(order, dtype, i, out_dtype, in_shape)
in_data = np.random.uniform(-1, 1, in_shape).astype(acc_type[dtype])
in_data[abs(in_data) < epsilon] = 2 * epsilon
Expand All @@ -3433,13 +3440,14 @@ def l2norm(input_data, axis=0, keepdims=True):
npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out
check_symbolic_forward(norm_sym, [in_data.astype(dtype)], [npy_out.astype(out_dtype)],
rtol=1e-3, atol=1e-5, ctx=ctx)
check_symbolic_backward(norm_sym, [in_data.astype(dtype)],
[np.ones(npy_out.shape).astype(out_dtype)],
[npy_out_backward], rtol=1e-3, atol=1e-5, ctx=ctx,
dtype=backward_dtype)
if not skip_backward:
check_symbolic_backward(norm_sym, [in_data.astype(dtype)],
[np.ones(npy_out.shape).astype(out_dtype)],
[npy_out_backward], rtol=1e-3, atol=1e-5, ctx=ctx,
dtype=backward_dtype)
# Disable numeric gradient https://github.com/apache/incubator-mxnet/issues/11509
# check gradient
if dtype is not np.float16:
if dtype is not np.float16 and not skip_backward:
check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon,
rtol=1e-1, atol=1e-3, dtype=backward_dtype)
if i < in_data_dim-1:
Expand All @@ -3449,12 +3457,13 @@ def l2norm(input_data, axis=0, keepdims=True):
check_symbolic_forward(norm_sym, [in_data], [npy_out.astype(dtype)],
rtol=1e-3 if dtype is np.float16 else 1e-3,
atol=1e-5 if dtype is np.float16 else 1e-5, ctx=ctx)
check_symbolic_backward(norm_sym, [in_data],
[np.ones(npy_out.shape).astype(out_dtype)],
[npy_out_backward.astype(out_dtype)],
rtol=1e-3, atol=1e-5, ctx=ctx, dtype=backward_dtype)
if not skip_backward:
check_symbolic_backward(norm_sym, [in_data],
[np.ones(npy_out.shape).astype(out_dtype)],
[npy_out_backward.astype(out_dtype)],
rtol=1e-3, atol=1e-5, ctx=ctx, dtype=backward_dtype)
# check gradient
if dtype is not np.float16:
if dtype is not np.float16 and not skip_backward:
check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon,
rtol=1e-1, atol=1e-3, dtype=backward_dtype)

Expand Down

0 comments on commit 32f4f61

Please sign in to comment.