Skip to content

Commit

Permalink
Merge pull request #29 from apache/master
Browse files Browse the repository at this point in the history
fix min max on zero-sized ndarray (apache#14745)
  • Loading branch information
Hao Li authored Apr 23, 2019
2 parents ed641f9 + da7fff7 commit 72d3f50
Show file tree
Hide file tree
Showing 3 changed files with 44 additions and 2 deletions.
27 changes: 27 additions & 0 deletions src/operator/tensor/broadcast_reduce_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -312,6 +312,23 @@ inline bool ReduceAxesShape(const nnvm::NodeAttrs& attrs,
return true;
}

inline bool ReduceMinMaxAxesShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
if (!shape_is_known((*in_attrs)[0])) return false;
CHECK_GT((*in_attrs)[0].Size(), 0U)
<< "Reduction input's size should > 0 "
<< (*in_attrs)[0];
const ReduceAxesParam& param = nnvm::get<ReduceAxesParam>(attrs.parsed);
SHAPE_ASSIGN_CHECK(*out_attrs, 0,
ReduceAxesShapeImpl((*in_attrs)[0], param.axis,
param.keepdims, param.exclude));
return true;
}


inline bool NormType(const nnvm::NodeAttrs& attrs,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
Expand Down Expand Up @@ -1488,6 +1505,16 @@ void PickOpBackward(const nnvm::NodeAttrs& attrs,
.add_argument("data", "NDArray-or-Symbol", "The input") \
.add_arguments(ReduceAxesParam::__FIELDS__())

#define MXNET_OPERATOR_REGISTER_MINMAX_REDUCE(name) \
NNVM_REGISTER_OP(name) \
.set_num_inputs(1) \
.set_num_outputs(1) \
.set_attr_parser(AxesParamParser<ReduceAxesParam>) \
.set_attr<mxnet::FInferShape>("FInferShape", ReduceMinMaxAxesShape) \
.set_attr<nnvm::FInferType>("FInferType", ElemwiseType<1, 1>) \
.add_argument("data", "NDArray-or-Symbol", "The input") \
.add_arguments(ReduceAxesParam::__FIELDS__())

#define MXNET_OPERATOR_REGISTER_REDUCE_BACKWARD(name) \
NNVM_REGISTER_OP(name) \
.set_num_outputs(1) \
Expand Down
4 changes: 2 additions & 2 deletions src/operator/tensor/broadcast_reduce_op_value.cc
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ MXNET_OPERATOR_REGISTER_REDUCE_BACKWARD(_backward_nanprod)
.set_num_inputs(3)
.set_attr<FCompute>("FCompute<cpu>", ReduceAxesBackwardUseInOut<cpu, mshadow_op::nanprod_grad>);

MXNET_OPERATOR_REGISTER_REDUCE(max)
MXNET_OPERATOR_REGISTER_MINMAX_REDUCE(max)
.add_alias("max_axis")
.describe(get_reduce_axes_description("max", __LINE__))
.set_attr<FCompute>("FCompute<cpu>", ReduceAxesCompute<cpu, mshadow::red::maximum>)
Expand All @@ -200,7 +200,7 @@ MXNET_OPERATOR_REGISTER_REDUCE_BACKWARD(_backward_max)
.set_num_inputs(3)
.set_attr<FCompute>("FCompute<cpu>", ReduceAxesBackwardUseInOut<cpu, mshadow_op::eq>);

MXNET_OPERATOR_REGISTER_REDUCE(min)
MXNET_OPERATOR_REGISTER_MINMAX_REDUCE(min)
.add_alias("min_axis")
.describe(get_reduce_axes_description("min", __LINE__))
.set_attr<FCompute>("FCompute<cpu>", ReduceAxesCompute<cpu, mshadow::red::minimum>)
Expand Down
15 changes: 15 additions & 0 deletions tests/python/unittest/test_operator.py
Original file line number Diff line number Diff line change
Expand Up @@ -6993,6 +6993,21 @@ def test_float16_min_max():
assert np.finfo('float16').max == mx.nd.max(a).asscalar()


@with_seed()
@mx.use_np_compat
def test_zero_size_min_max():
def min():
a = mx.nd.zeros(shape=(5, 0))
a.min()

def max():
a = mx.nd.zeros(shape=(5, 0))
a.max()

assert_raises(MXNetError, min)
assert_raises(MXNetError, max)


@with_seed()
def test_squeeze_op():
def check_squeeze_op(shape, axis=None):
Expand Down

0 comments on commit 72d3f50

Please sign in to comment.