Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

[numpy] Fix unit tests after using numpy-compatible shape definition #14521

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions src/c_api/c_api_common.h
Original file line number Diff line number Diff line change
Expand Up @@ -91,8 +91,9 @@ struct MXAPIThreadLocalEntry {
data->resize(shapes.size());
size_t size = 0;
for (const auto& s : shapes) {
if (s.ndim() > 0);
size += s.ndim();
if (s.ndim() > 0) {
size += s.ndim();
}
}
buffer->resize(size);
int *ptr = buffer->data();
Expand Down
5 changes: 5 additions & 0 deletions src/common/utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -746,6 +746,11 @@ inline void ParallelCopy(DType* dst, const DType* src, index_t size) {
* 4. -1 dim size means the dimension's size is unknown.
* so that operator's infer shape function can work in backend.
* \param shape to be converted.
* Note: It is possible that the shape to be converted is already
* numpy compatible. For example, when a subgraph operator's infer
* shape function is called from the infer shape pass of the whole
* graph, its input/output shapes have been converted to numpy
* compatible shapes.
*/
inline void ConvertToNumpyShape(mxnet::TShape* shape) {
if (shape->ndim() == 0) { // legacy shape ndim = 0 means unknown
Expand Down
4 changes: 2 additions & 2 deletions src/operator/leaky_relu-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -338,10 +338,10 @@ class LeakyReLUProp : public OperatorProperty {
CHECK_EQ(in_shape->size(), 1U) << "Input:[data]";
}
const mxnet::TShape &dshape = in_shape->at(leakyrelu::kData);
if (dshape.ndim() == 0) return false;
if (!mxnet::ndim_is_known(dshape)) return false;
if (param_.act_type == leakyrelu::kPReLU) {
const mxnet::TShape &gshape = in_shape->at(leakyrelu::kGamma);
if (gshape.ndim() == 0) {
if (!mxnet::ndim_is_known(gshape)) {
in_shape->at(leakyrelu::kGamma) = mxnet::TShape(Shape1(dshape[1]));
}
if (dshape == gshape) {
Expand Down
12 changes: 7 additions & 5 deletions src/operator/nn/deconvolution-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -134,11 +134,13 @@ struct DeconvolutionParam : public dmlc::Parameter<DeconvolutionParam> {
for (size_t i = 0; i < ndim; i++) {
// input.ndim() can be larger than ndim, in case that the complete input
// shape was passed and not only the ndim last ones
o_pad[i] = stride[i] * (input[(input_ndim - ndim) + i] - 1) + DilatedKernelSize(i);
CHECK_GE(o_pad[i], target_shape[i]) << "too big target shape";
o_pad[i] -= target_shape[i];
o_adj[i] = o_pad[i] % 2;
o_pad[i] = (o_pad[i] + 1) / 2;
if (mxnet::dim_size_is_known(input, input_ndim - ndim + i)) {
o_pad[i] = stride[i] * (input[(input_ndim - ndim) + i] - 1) + DilatedKernelSize(i);
CHECK_GE(o_pad[i], target_shape[i]) << "too big target shape";
o_pad[i] -= target_shape[i];
o_adj[i] = o_pad[i] % 2;
o_pad[i] = (o_pad[i] + 1) / 2;
}
}
} else {
for (size_t i = 0; i < ndim; i++) {
Expand Down
50 changes: 37 additions & 13 deletions src/operator/nn/deconvolution.cc
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ static bool DeconvolutionShape(const nnvm::NodeAttrs& attrs,
}
out_shape->resize(1, mxnet::TShape());
const mxnet::TShape &dshape = (*in_shape)[deconv::kData];
if (!shape_is_known(dshape)) return false;
if (!mxnet::ndim_is_known(dshape)) return false;

if (param_.kernel.ndim() == 1) {
// 1d conv
Expand Down Expand Up @@ -90,8 +90,12 @@ static bool DeconvolutionShape(const nnvm::NodeAttrs& attrs,
Shape<3> oshape;
oshape[0] = dshape_ncw[0];
oshape[1] = param_.num_filter;
oshape[2] = param_.stride[0] * (dshape_ncw[2] - 1) +
dilated_ksize_x - 2 * o_pad[0] + o_adj[0];
if (mxnet::dim_size_is_known(dshape_ncw[2])) {
oshape[2] = param_.stride[0] * (dshape_ncw[2] - 1) +
dilated_ksize_x - 2 * o_pad[0] + o_adj[0];
} else {
oshape[2] = -1;
}

if (param_.target_shape.ndim() > 0) {
if (param_.target_shape[0] > 0) {
Expand Down Expand Up @@ -141,10 +145,18 @@ static bool DeconvolutionShape(const nnvm::NodeAttrs& attrs,
Shape<4> oshape;
oshape[0] = dshape_nchw[0];
oshape[1] = param_.num_filter;
oshape[2] = param_.stride[0] * (dshape_nchw[2] - 1) +
dilated_ksize_y - 2 * o_pad[0] + o_adj[0];
oshape[3] = param_.stride[1] * (dshape_nchw[3] - 1) +
dilated_ksize_x - 2 * o_pad[1] + o_adj[1];
if (mxnet::dim_size_is_known(dshape_nchw[2])) {
oshape[2] = param_.stride[0] * (dshape_nchw[2] - 1) +
dilated_ksize_y - 2 * o_pad[0] + o_adj[0];
} else {
oshape[2] = -1;
}
if (mxnet::dim_size_is_known(dshape_nchw[3])) {
oshape[3] = param_.stride[1] * (dshape_nchw[3] - 1) +
dilated_ksize_x - 2 * o_pad[1] + o_adj[1];
} else {
oshape[3] = -1;
}

if (param_.target_shape.ndim() > 1) {
if (param_.target_shape[0] > 0) {
Expand Down Expand Up @@ -203,12 +215,24 @@ static bool DeconvolutionShape(const nnvm::NodeAttrs& attrs,
Shape<5> oshape;
oshape[0] = dshape_ncdhw[0];
oshape[1] = param_.num_filter;
oshape[2] = param_.stride[0] * (dshape_ncdhw[2] - 1) +
dilated_ksize_d - 2 * o_pad[0] + o_adj[0];
oshape[3] = param_.stride[1] * (dshape_ncdhw[3] - 1) +
dilated_ksize_y - 2 * o_pad[1] + o_adj[1];
oshape[4] = param_.stride[2] * (dshape_ncdhw[4] - 1) +
dilated_ksize_x - 2 * o_pad[2] + o_adj[2];
if (mxnet::dim_size_is_known(dshape_ncdhw[2])) {
oshape[2] = param_.stride[0] * (dshape_ncdhw[2] - 1) +
dilated_ksize_d - 2 * o_pad[0] + o_adj[0];
} else {
oshape[2] = -1;
}
if (mxnet::dim_size_is_known(dshape_ncdhw[3])) {
oshape[3] = param_.stride[1] * (dshape_ncdhw[3] - 1) +
dilated_ksize_y - 2 * o_pad[1] + o_adj[1];
} else {
oshape[3] = -1;
}
if (mxnet::dim_size_is_known(dshape_ncdhw[4])) {
oshape[4] = param_.stride[2] * (dshape_ncdhw[4] - 1) +
dilated_ksize_x - 2 * o_pad[2] + o_adj[2];
} else {
oshape[4] = -1;
}

if (param_.target_shape.ndim() > 2) {
if (param_.target_shape[0] > 0) {
Expand Down
2 changes: 1 addition & 1 deletion src/operator/tensor/matrix_op-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -594,7 +594,7 @@ void SliceCsrImpl(const SliceParam &param, const OpContext& ctx,
mxnet::TShape begin(N, -1), end(N, -1);
for (int i = 0; i < N; ++i) {
int s = 0;
if (param.begin[i]) {
if (i < param.begin.ndim() && param.begin[i]) {
s = *param.begin[i];
if (s < 0) s += ishape[i];
}
Expand Down
6 changes: 5 additions & 1 deletion tests/python/unittest/test_ndarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,11 @@ def test_ndarray_setitem():

# numpy assignment for empty axis
for trivial_shape in [(), (1,), (1, 1), (1, 1, 1)]:
x = mx.nd.zeros(trivial_shape)
if trivial_shape == tuple():
with mx.numpy.enable_np_comp():
x = mx.nd.zeros(trivial_shape)
else:
x = mx.nd.zeros(trivial_shape)
x[:] = np.ones(trivial_shape)
x_np = np.ones(trivial_shape, dtype=x.dtype)
assert x.shape == trivial_shape
Expand Down