diff --git a/src/operator/tensor/matrix_op-inl.h b/src/operator/tensor/matrix_op-inl.h index 217bf10398ad..46bb20a880cb 100644 --- a/src/operator/tensor/matrix_op-inl.h +++ b/src/operator/tensor/matrix_op-inl.h @@ -1094,7 +1094,7 @@ inline bool SliceAssignOpShape(const nnvm::NodeAttrs& attrs, common::StaticArray begin, end, step; GetIndexRange(dshape, param.begin, param.end, param.step, &begin, &end, &step); for (int i = 0; i < param.begin.ndim(); ++i) { - const int b = begin[i], e = end[i], s = step[i]; + const index_t b = begin[i], e = end[i], s = step[i]; SetSliceOpOutputDimSize(dshape, i, b, e, s, &vshape); } }) @@ -1137,7 +1137,7 @@ void SliceAssignOpForward(const nnvm::NodeAttrs& attrs, } MSHADOW_TYPE_SWITCH(out.type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { - int num_threads = val.shape_.FlatTo2D()[0]; + index_t num_threads = val.shape_.FlatTo2D()[0]; if (std::is_same::value) { num_threads *= val.shape_.get()[ndim - 1]; } @@ -1241,7 +1241,7 @@ void SliceAssignScalarOpForward(const nnvm::NodeAttrs& attrs, return; // slice_assign of zero-sized subspaced needs no operation. } for (index_t i = 0; i < param.begin.ndim(); ++i) { - const int b = begin[i], e = end[i], s = step[i]; + const index_t b = begin[i], e = end[i], s = step[i]; SetSliceOpOutputDimSize(data.shape_, i, b, e, s, &vshape); } MSHADOW_TYPE_SWITCH_WITH_BOOL(out.type_flag_, DType, { diff --git a/tests/nightly/test_large_array.py b/tests/nightly/test_large_array.py index d93d1835a1ec..b7a27c3f4302 100644 --- a/tests/nightly/test_large_array.py +++ b/tests/nightly/test_large_array.py @@ -2020,6 +2020,17 @@ def test_sparse_dot(): assert out.shape == (2, 2) +def test_slice_assign(): + # test _slice_assign + A = np.zeros((2**31, 2)) + A[-1] = np.ones((1)) + assert A[-1, 0] == 1 and A[-1, 1] == 1 + # test _slice_assign_scalar + B = np.zeros((2**31, 2)) + B[-1] = 2 + assert B[-1, 0] == 2 and B[-1, 1] == 2 + + if __name__ == '__main__': import nose nose.runmodule()