Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
[Numpy] Numpy compatible slicing (#15798)
Browse files Browse the repository at this point in the history
* Modify ndarray slice to have numpy compatbile behaviou

* Minor syntax fix

* Fix slice inconsistency

* Allow empty outputs after slicing ndarrays

* Fix
  • Loading branch information
Mike authored and haojin2 committed Aug 10, 2019
1 parent 0eb213d commit 44a7fca
Show file tree
Hide file tree
Showing 4 changed files with 91 additions and 43 deletions.
67 changes: 33 additions & 34 deletions src/operator/tensor/matrix_op-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -685,13 +685,13 @@ inline void GetIndexRange(const mxnet::TShape& dshape,
<< "Static array size=" << ndim
<< " is not equal to data shape ndim=" << dshape.ndim();

if (param_step.ndim() != 0) {
if (param_step.ndim() > 0) {
CHECK_EQ(param_step.ndim(), param_begin.ndim())
<< "step and begin must have the same length";
}

for (int i = 0; i < param_begin.ndim(); ++i) {
index_t s = param_step.ndim() != 0U && param_step[i].has_value() ? param_step[i].value() : 1;
index_t s = param_step.ndim() > 0 && param_step[i].has_value() ? param_step[i].value() : 1;
CHECK_NE(s, 0) << "slice op step[" << i << "] cannot be 0";

index_t b = 0, e = 0;
Expand All @@ -700,58 +700,54 @@ inline void GetIndexRange(const mxnet::TShape& dshape,
b = param_begin[i].has_value() ? param_begin[i].value() : (s < 0 ? len - 1 : 0);
e = param_end[i].has_value() ? param_end[i].value() : (s < 0 ? -1 : len);

// checking upper and lower bounds for begin
if (b < 0) {
b += len;
CHECK_GE(b, 0) << "slicing with begin[" << i << "]=" << b - len
<< " exceeds limit of input dimension[" << i << "]=" << len;
}
CHECK_LT(b, len) << "slicing with begin[" << i << "]=" << b
<< " exceeds limit of input dimension[" << i << "]=" << len;

// checking upper and lower bounds for end
if (e < 0 && param_end[i].has_value()) {
if (!(s < 0 && e == -1)) {
// Keep end=-1 as one-beyond-limits index for negative stride
e += len;
}
CHECK_GE(e, 0) << "slicing with end[" << i << "]=" << e - len
<< " exceeds limit of input dimension[" << i << "]=" << len;
e += len;
}
CHECK_LE(e, len) << "slicing with end[" << i << "]=" << e
<< " exceeds limit of input dimension[" << i << "]=" << len;

// checking begin==end case which is not supported
CHECK_NE(b, e) << "slicing with begin[" << i << "]=end[" << i << "]="
<< e << " results in an empty tensor and is not supported";
// move the begin and end to correct position for calculating dim size
b = (b < 0 && s > 0) ? 0 : b;
b = (b > len - 1 && s < 0) ? len - 1 : b;
// if the start value lead to empty tensor under step s, use -1 for indication
b = (b < 0 || b > len - 1) ? -1 : b;
e = e > -1 ? e : -1;
e = e > len ? len : e;
} else if (len == 0) {
b = 0;
e = 0;
}

(*begin)[i] = b;
(*end)[i] = e;
(*step)[i] = s;
}

for (index_t i = param_begin.ndim(); i < dshape.ndim(); ++i) {
for (int i = param_begin.ndim(); i < dshape.ndim(); ++i) {
(*begin)[i] = 0;
(*end)[i] = dshape[i];
(*step)[i] = 1;
}
}

inline void SetSliceOpOutputDimSize(const index_t i, const int b,
inline void SetSliceOpOutputDimSize(const mxnet::TShape& dshape,
const index_t i, const int b,
const int e, const int s,
mxnet::TShape* oshape) {
if (e != b) {
if (!mxnet::dim_size_is_known(dshape, i)) {
(*oshape)[i] = -1;
return;
}
if (e != b && b >= 0) {
if (s > 0) {
CHECK_LT(b, e) << "slicing with begin[" << i << "]=" << b << ", end[" << i << "]="
<< e << ", and step[" << i << "]=" << s << " is invalid";
(*oshape)[i] = (e - b - 1) / s + 1;
(*oshape)[i] = e > b ? (e - b - 1) / s + 1 : 0;
} else {
CHECK_LT(e, b) << "slicing with begin[" << i << "]=" << b << ", end[" << i << "]="
<< e << ", and step[" << i << "]=" << s << " is invalid";
(*oshape)[i] = (b - e - 1) / (-s) + 1;
(*oshape)[i] = e < b ? (b - e - 1) / (-s) + 1 : 0;
}
} // else leave oshape[i] as 0 for partial infer
} else {
(*oshape)[i] = 0;
}
}

inline bool SliceOpShape(const nnvm::NodeAttrs& attrs,
Expand All @@ -761,6 +757,7 @@ inline bool SliceOpShape(const nnvm::NodeAttrs& attrs,
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& dshape = (*in_attrs)[0];
if (!mxnet::ndim_is_known(dshape)) return false;
CHECK_GT(dshape.ndim(), 0) << "slice only works for ndim > 0";
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
mxnet::TShape oshape = dshape;

Expand All @@ -769,12 +766,12 @@ inline bool SliceOpShape(const nnvm::NodeAttrs& attrs,
GetIndexRange(dshape, param.begin, param.end, param.step, &begin, &end, &step);
for (int i = 0; i < param.begin.ndim(); ++i) {
const int b = begin[i], e = end[i], s = step[i];
SetSliceOpOutputDimSize(i, b, e, s, &oshape);
SetSliceOpOutputDimSize(dshape, i, b, e, s, &oshape);
}
})

SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape);
return shape_is_known(oshape);
return shape_is_known(dshape) && shape_is_known(oshape);
}

template<int ndim, int req, typename xpu>
Expand Down Expand Up @@ -852,6 +849,7 @@ void SliceOpForward(const nnvm::NodeAttrs& attrs,
Stream<xpu>* s = ctx.get_stream<xpu>();
const TBlob& data = inputs[0];
const TBlob& out = outputs[0];
if (out.Size() == 0) return;
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
MXNET_NDIM_SWITCH(data.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
Expand Down Expand Up @@ -951,6 +949,7 @@ void SliceOpBackward(const nnvm::NodeAttrs& attrs,
} else if (req[0] == kWriteInplace) {
LOG(FATAL) << "_slice_backward does not support kWriteInplace";
}
if (ograd.Size() == 0) return;
MXNET_NDIM_SWITCH(ograd.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(igrad.shape_, param.begin, param.end, param.step, &begin, &end, &step);
Expand Down Expand Up @@ -982,7 +981,7 @@ inline bool SliceAssignOpShape(const nnvm::NodeAttrs& attrs,
GetIndexRange(dshape, param.begin, param.end, param.step, &begin, &end, &step);
for (int i = 0; i < param.begin.ndim(); ++i) {
const int b = begin[i], e = end[i], s = step[i];
SetSliceOpOutputDimSize(i, b, e, s, &vshape);
SetSliceOpOutputDimSize(dshape, i, b, e, s, &vshape);
}
})
SHAPE_ASSIGN_CHECK(*in_attrs, 1, vshape);
Expand Down Expand Up @@ -1121,7 +1120,7 @@ void SliceAssignScalarOpForward(const nnvm::NodeAttrs& attrs,
GetIndexRange(data.shape_, param.begin, param.end, param.step, &begin, &end, &step);
for (index_t i = 0; i < param.begin.ndim(); ++i) {
const int b = begin[i], e = end[i], s = step[i];
SetSliceOpOutputDimSize(i, b, e, s, &vshape);
SetSliceOpOutputDimSize(data.shape_, i, b, e, s, &vshape);
}
MSHADOW_TYPE_SWITCH(out.type_flag_, DType, {
mxnet_op::Kernel<slice_assign_scalar<ndim>, xpu>::Launch(s, vshape.FlatTo2D()[0],
Expand Down
1 change: 1 addition & 0 deletions src/operator/tensor/matrix_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -506,6 +506,7 @@ Example::
[5., 7.],
[1., 3.]]
)code" ADD_FILELINE)
.add_alias("_npx_slice")
.set_attr_parser(ParamParser<SliceParam>)
.set_attr<mxnet::FInferShape>("FInferShape", SliceOpShape)
.set_attr<nnvm::FInferType>("FInferType", ElemwiseType<1, 1>)
Expand Down
57 changes: 57 additions & 0 deletions tests/python/unittest/test_numpy_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,63 @@ def is_int(dtype):
assert_almost_equal(mx_out.asnumpy(), np_out, rtol=1e-3, atol=1e-5)


@with_seed()
@use_np
def test_npx_slice():
class TestSlice(HybridBlock):
def __init__(self, begin, end, step):
super(TestSlice, self).__init__()
self._begin = begin
self._end = end
self._step = step

def hybrid_forward(self, F, a):
return F.npx.slice(a, begin=self._begin, end=self._end, step=self._step)

shape = (8, 16, 9, 9)
np_array = _np.arange(_np.prod(shape), dtype='int32').reshape(shape)
configs = [
([], [], None),
([], [], []),
([1], [4], None),
([1], [10], [3]),
([10], [0], [-2]),
([None], [None], [None]),
([None], [None], [-1]),
([10], [None], [-1]),
([1, 0, 3], [-2, 10, -4], [None, 2, 3]),
([-2, -3, -5, -6], [1, 3, 4, 5], None),
([-2, -3, -5, -6], [1, 3, 4, 5], [-1, -2, -3, -4]),
([2, -3, -5, -6], [2, 3, 4, 5], None),
([2, -3, -5, 5], [3, 3, 4, 5], None),
]

for hybridize in [True, False]:
for config in configs:
start, end, step = config[0], config[1], config[2]
test_slice = TestSlice(begin=start, end=end, step=step)
if hybridize:
test_slice.hybridize()

a = np.array(np_array, dtype=np_array.dtype)
a.attach_grad()
basic_index = tuple([
slice(start[i], end[i], step[i]) if step is not None else slice(start[i], end[i])
for i in range(len(start))
])
expected_ret = np_array[basic_index]
with mx.autograd.record():
y = test_slice(a)

assert same(y.asnumpy(), expected_ret)

# test backward
mx.autograd.backward(y)
expected_grad = _np.zeros(shape)
expected_grad[basic_index] = 1
assert same(a.grad.asnumpy(), expected_grad)


if __name__ == '__main__':
import nose
nose.runmodule()
9 changes: 0 additions & 9 deletions tests/python/unittest/test_operator.py
Original file line number Diff line number Diff line change
Expand Up @@ -7529,15 +7529,6 @@ def test_slice_forward_backward(a, index):
for index in index_list:
test_slice_forward_backward(arr, index)

def test_begin_equals_end(shape, begin, end, step):
in_arr = mx.nd.arange(np.prod(shape)).reshape(shape=shape)
out_arr = mx.nd.slice(in_arr, begin=begin, end=end, step=step)

assertRaises(MXNetError, test_begin_equals_end, (4,), (2,), (2,), (1,))
assertRaises(MXNetError, test_begin_equals_end, (1, 5), (None, 3), (None, 3), (-1, 1))
assertRaises(MXNetError, test_begin_equals_end, (3, 4, 5), (1, 3, 1), (3, 3, 1), (1, -3, 2))
assertRaises(MXNetError, test_begin_equals_end, (2, 4), (None, 2), (None, 2), (1, -1))

# check numeric gradient
in_data = np.arange(36).reshape(2, 2, 3, 3)
data = mx.sym.Variable('data')
Expand Down

0 comments on commit 44a7fca

Please sign in to comment.