Skip to content

Commit

Permalink
[Numpy] Change semantics of ndim for operators in `src/operator/contr…
Browse files Browse the repository at this point in the history
…ib` (apache#14409)

* Initial commit

* Address comments
  • Loading branch information
junrushao authored and reminisce committed Apr 10, 2019
1 parent 9ad77fc commit da42405
Show file tree
Hide file tree
Showing 18 changed files with 41 additions and 50 deletions.
5 changes: 3 additions & 2 deletions src/operator/contrib/adamw-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -87,8 +87,9 @@ inline bool MPUpdateInferShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *out_attrs) {
CHECK_EQ(in_attrs->size(), static_cast<size_t>(total_in)) << " in operator " << attrs.name;
CHECK_EQ(out_attrs->size(), static_cast<size_t>(n_out)) << " in operator " << attrs.name;
// rescale_grad.shape = (1,)
SHAPE_ASSIGN_CHECK(*in_attrs, total_in - 1, mshadow::Shape1(1));
// rescale_grad.shape = ()
SHAPE_ASSIGN_CHECK(*in_attrs, total_in - 1, mxnet::TShape());
// TODO(@reminisce): change "none" behavior in ElemwiseAttr
return ElemwiseAttr<mxnet::TShape, shape_is_none, shape_assign, true, shape_string, n_in, n_out>(
attrs, in_attrs, out_attrs, mxnet::TShape());
}
Expand Down
6 changes: 3 additions & 3 deletions src/operator/contrib/adaptive_avg_pooling-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -48,9 +48,9 @@ namespace mxnet {
namespace op {

struct AdaptiveAvgPoolParam : public dmlc::Parameter<AdaptiveAvgPoolParam> {
mxnet::TShape output_size;
mxnet::Tuple<int> output_size;
DMLC_DECLARE_PARAMETER(AdaptiveAvgPoolParam) {
DMLC_DECLARE_FIELD(output_size).set_default(mxnet::TShape())
DMLC_DECLARE_FIELD(output_size).set_default(mxnet::Tuple<int>())
.describe("int (output size) or a tuple of int for output (height, width).");
}
};
Expand Down Expand Up @@ -125,7 +125,7 @@ static bool AdaptiveAvgPoolOpInferShape(const nnvm::NodeAttrs& attrs,
CHECK_EQ(out_shape->size(), 1U) << "Output:[data]";
const AdaptiveAvgPoolParam& param = nnvm::get<AdaptiveAvgPoolParam>(attrs.parsed);
mxnet::TShape dshape(in_shape->at(0));
if (dshape.ndim() == 0) return false;
if (mxnet::op::shape_is_none(dshape)) return false;
if (param.output_size.ndim() == 0) {
dshape[2] = 1;
dshape[3] = 1;
Expand Down
2 changes: 1 addition & 1 deletion src/operator/contrib/bilinear_resize-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ static bool BilinearSampleOpInferShape(const nnvm::NodeAttrs& attrs,
CHECK_EQ(out_shape->size(), 1U) << "Output:[data]";
const BilinearSampleParam& param = nnvm::get<BilinearSampleParam>(attrs.parsed);
mxnet::TShape dshape(in_shape->at(0));
if (dshape.ndim() == 0) return false;
if (mxnet::op::shape_is_none(dshape)) return false;
if (param.scale_height.has_value()) {
dshape[2] = static_cast<int>(param.scale_height.value() * in_shape->at(0)[2]);
} else {
Expand Down
2 changes: 1 addition & 1 deletion src/operator/contrib/boolean_mask.cc
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ inline void BooleanMaskForward<cpu>(const nnvm::NodeAttrs& attrs,
const NDArray &out = outputs[0];
CHECK_EQ(axis, 0) << "Not supported yet";
CHECK_EQ(data.shape()[axis], idx.shape()[0]);
CHECK_EQ(idx.shape().ndim(), 1U);
CHECK_EQ(idx.shape().ndim(), 1U); // idx is required to be 1-d.
// count the number of 1s in `idx`, so that we could know the output dimension
size_t idx_size = idx.shape()[0];
std::vector<int32_t> prefix_sum(idx_size, 0);
Expand Down
4 changes: 3 additions & 1 deletion src/operator/contrib/bounding_box-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,9 @@ inline bool BoxNMSShape(const nnvm::NodeAttrs& attrs,
const BoxNMSParam& param = nnvm::get<BoxNMSParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 2U);
if (in_attrs->at(0).ndim() == 0U && out_attrs->at(0).ndim() == 0U) {
// TODO(@junrushao1994): verify with Joshua Z. Zhang about this operator
if (mxnet::op::shape_is_none(in_attrs->at(0))
&& mxnet::op::shape_is_none(out_attrs->at(0))) {
return false;
}

Expand Down
2 changes: 1 addition & 1 deletion src/operator/contrib/count_sketch-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ class CountSketchProp : public OperatorProperty {
CHECK_EQ(in_shape->size(), 3) <<"Input:[data, h, s]";
const mxnet::TShape &dshape = (*in_shape)[CountSketch::kData];
// require data to be known
if (dshape.ndim() == 0) return false;
if (mxnet::op::shape_is_none(dshape)) return false;

out_shape->clear();
if (dshape.ndim() == 4) {
Expand Down
14 changes: 7 additions & 7 deletions src/operator/contrib/deformable_convolution-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -69,11 +69,11 @@ struct DeformableConvolutionParam : public dmlc::Parameter<DeformableConvolution
dmlc::optional<int> layout;
DMLC_DECLARE_PARAMETER(DeformableConvolutionParam) {
DMLC_DECLARE_FIELD(kernel).describe("Convolution kernel size: (h, w) or (d, h, w)");
DMLC_DECLARE_FIELD(stride).set_default(mxnet::TShape())
DMLC_DECLARE_FIELD(stride).set_default(mxnet::TShape(0))
.describe("Convolution stride: (h, w) or (d, h, w). Defaults to 1 for each dimension.");
DMLC_DECLARE_FIELD(dilate).set_default(mxnet::TShape())
DMLC_DECLARE_FIELD(dilate).set_default(mxnet::TShape(0))
.describe("Convolution dilate: (h, w) or (d, h, w). Defaults to 1 for each dimension.");
DMLC_DECLARE_FIELD(pad).set_default(mxnet::TShape())
DMLC_DECLARE_FIELD(pad).set_default(mxnet::TShape(0))
.describe("Zero pad for convolution: (h, w) or (d, h, w). Defaults to no padding.");
DMLC_DECLARE_FIELD(num_filter).set_range(1, 100000)
.describe("Convolution filter(channel) number");
Expand Down Expand Up @@ -347,9 +347,9 @@ class DeformableConvolutionProp : public OperatorProperty {
param_.Init(kwargs);
if (param_.kernel.ndim() == 2) {
param_.layout = param_.layout ? param_.layout.value() : mshadow::kNCHW;
if (param_.stride.ndim() == 0) param_.stride = Shape2(1, 1);
if (param_.dilate.ndim() == 0) param_.dilate = Shape2(1, 1);
if (param_.pad.ndim() == 0) param_.pad = Shape2(0, 0);
if (mxnet::op::shape_is_none(param_.stride)) param_.stride = Shape2(1, 1);
if (mxnet::op::shape_is_none(param_.dilate)) param_.dilate = Shape2(1, 1);
if (mxnet::op::shape_is_none(param_.pad)) param_.pad = Shape2(0, 0);
} else {
LOG(FATAL) << "not implemented";
}
Expand All @@ -371,7 +371,7 @@ class DeformableConvolutionProp : public OperatorProperty {
out_shape->resize(1, mxnet::TShape());
const mxnet::TShape &dshp = (*in_shape)[conv::kData];
const mxnet::TShape &oshp = (*in_shape)[conv::kOffset];
if (dshp.ndim() == 0) return false;
if (mxnet::op::shape_is_none(dshp)) return false;
if (param_.kernel.ndim() == 2) {
// 2d conv
CHECK_EQ(dshp.ndim(), 4U) \
Expand Down
32 changes: 9 additions & 23 deletions src/operator/contrib/dgl_graph.cc
Original file line number Diff line number Diff line change
Expand Up @@ -265,28 +265,22 @@ static bool CSRNeighborUniformSampleShape(const nnvm::NodeAttrs& attrs,
out_shape[0] = params.max_num_vertices + 1;
for (size_t i = 0; i < num_subgraphs; i++) {
SHAPE_ASSIGN_CHECK(*out_attrs, i, out_shape);
success = success &&
out_attrs->at(i).ndim() != 0U &&
out_attrs->at(i).Size() != 0U;
success = success && !mxnet::op::shape_is_none(out_attrs->at(i));
}
// sub_csr
mxnet::TShape out_csr_shape(2);
out_csr_shape[0] = params.max_num_vertices;
out_csr_shape[1] = in_attrs->at(0)[1];
for (size_t i = 0; i < num_subgraphs; i++) {
SHAPE_ASSIGN_CHECK(*out_attrs, i + num_subgraphs, out_csr_shape);
success = success &&
out_attrs->at(i + num_subgraphs).ndim() != 0U &&
out_attrs->at(i + num_subgraphs).Size() != 0U;
success = success && !mxnet::op::shape_is_none(out_attrs->at(i + num_subgraphs));
}
// sub_layer
mxnet::TShape out_layer_shape(1);
out_layer_shape[0] = params.max_num_vertices;
for (size_t i = 0; i < num_subgraphs; i++) {
SHAPE_ASSIGN_CHECK(*out_attrs, i + 2*num_subgraphs, out_layer_shape);
success = success &&
out_attrs->at(i + 2*num_subgraphs).ndim() != 0U &&
out_attrs->at(i + 2*num_subgraphs).Size() != 0U;
success = success && !mxnet::op::shape_is_none(out_attrs->at(i + 2 * num_subgraphs));
}

return success;
Expand Down Expand Up @@ -323,37 +317,29 @@ static bool CSRNeighborNonUniformSampleShape(const nnvm::NodeAttrs& attrs,
out_shape[0] = params.max_num_vertices + 1;
for (size_t i = 0; i < num_subgraphs; i++) {
SHAPE_ASSIGN_CHECK(*out_attrs, i, out_shape);
success = success &&
out_attrs->at(i).ndim() != 0U &&
out_attrs->at(i).Size() != 0U;
success = success && !mxnet::op::shape_is_none(out_attrs->at(i));
}
// sub_csr
mxnet::TShape out_csr_shape(2);
out_csr_shape[0] = params.max_num_vertices;
out_csr_shape[1] = in_attrs->at(0)[1];
for (size_t i = 0; i < num_subgraphs; i++) {
SHAPE_ASSIGN_CHECK(*out_attrs, i + num_subgraphs, out_csr_shape);
success = success &&
out_attrs->at(i + num_subgraphs).ndim() != 0U &&
out_attrs->at(i + num_subgraphs).Size() != 0U;
success = success && !mxnet::op::shape_is_none(out_attrs->at(i + num_subgraphs));
}
// sub_probability
mxnet::TShape out_prob_shape(1);
out_prob_shape[0] = params.max_num_vertices;
for (size_t i = 0; i < num_subgraphs; i++) {
SHAPE_ASSIGN_CHECK(*out_attrs, i + 2*num_subgraphs, out_prob_shape);
success = success &&
out_attrs->at(i + 2*num_subgraphs).ndim() != 0U &&
out_attrs->at(i + 2*num_subgraphs).Size() != 0U;
success = success && !mxnet::op::shape_is_none(out_attrs->at(i + 2 * num_subgraphs));
}
// sub_layer
mxnet::TShape out_layer_shape(1);
out_layer_shape[0] = params.max_num_vertices;
for (size_t i = 0; i < num_subgraphs; i++) {
SHAPE_ASSIGN_CHECK(*out_attrs, i + 3*num_subgraphs, out_prob_shape);
success = success &&
out_attrs->at(i + 3*num_subgraphs).ndim() != 0U &&
out_attrs->at(i + 3*num_subgraphs).Size() != 0U;
success = success && !mxnet::op::shape_is_none(out_attrs->at(i + 3 * num_subgraphs));
}

return success;
Expand Down Expand Up @@ -1199,7 +1185,7 @@ inline bool EdgeIDShape(const nnvm::NodeAttrs& attrs,
SHAPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(1));
SHAPE_ASSIGN_CHECK(*in_attrs, 1, out_attrs->at(0));
SHAPE_ASSIGN_CHECK(*in_attrs, 2, out_attrs->at(0));
return out_attrs->at(0).ndim() != 0U && out_attrs->at(0).Size() != 0U;
return !mxnet::op::shape_is_none(out_attrs->at(0));
}

inline bool EdgeIDType(const nnvm::NodeAttrs& attrs,
Expand Down Expand Up @@ -1357,7 +1343,7 @@ inline bool DGLAdjacencyShape(const nnvm::NodeAttrs& attrs,

SHAPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0));
SHAPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0));
return out_attrs->at(0).ndim() != 0U && out_attrs->at(0).Size() != 0U;
return !mxnet::op::shape_is_none(out_attrs->at(0));
}

inline bool DGLAdjacencyType(const nnvm::NodeAttrs& attrs,
Expand Down
2 changes: 1 addition & 1 deletion src/operator/contrib/fft-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -241,7 +241,7 @@ class FFTProp : public OperatorProperty {
CHECK_EQ(in_shape->size(), 1) <<"Input:[data]";
const mxnet::TShape &dshape = (*in_shape)[fft::kData];
// require data to be known
if (dshape.ndim() == 0) return false;
if (mxnet::op::shape_is_none(dshape)) return false;

out_shape->clear();
if (dshape.ndim() == 4) {
Expand Down
2 changes: 1 addition & 1 deletion src/operator/contrib/ifft-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -231,7 +231,7 @@ class IFFTProp : public OperatorProperty {
CHECK_EQ(in_shape->size(), 1) <<"Input:[data]";
const mxnet::TShape &dshape = (*in_shape)[ifft::kData];
// require data to be known
if (dshape.ndim() == 0) return false;
if (mxnet::op::shape_is_none(dshape)) return false;

out_shape->clear();
if (dshape.ndim() == 4) {
Expand Down
3 changes: 1 addition & 2 deletions src/operator/contrib/index_copy-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -76,8 +76,7 @@ inline bool IndexCopyShape(const nnvm::NodeAttrs& attrs,
CHECK_EQ(in_attrs->at(1)[0], in_attrs->at(2)[0]);
SHAPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0));
SHAPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0));
return out_attrs->at(0).ndim() != 0U &&
out_attrs->at(0).Size() != 0U;
return !mxnet::op::shape_is_none(out_attrs->at(0));
}

} // namespace op
Expand Down
2 changes: 1 addition & 1 deletion src/operator/contrib/multi_proposal-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ class MultiProposalProp : public OperatorProperty {
using namespace mshadow;
CHECK_EQ(in_shape->size(), 3) << "Input:[cls_prob, bbox_pred, im_info]";
const mxnet::TShape &dshape = in_shape->at(proposal::kClsProb);
if (dshape.ndim() == 0) return false;
if (!mxnet::op::shape_is_none(dshape)) return false;
Shape<4> bbox_pred_shape;
bbox_pred_shape = Shape4(dshape[0], dshape[1] * 2, dshape[2], dshape[3]);
SHAPE_ASSIGN_CHECK(*in_shape, proposal::kBBoxPred,
Expand Down
3 changes: 2 additions & 1 deletion src/operator/contrib/nnvm_to_onnx.cc
Original file line number Diff line number Diff line change
Expand Up @@ -417,7 +417,8 @@ std::unordered_map<std::string, mxnet::TShape> GetPlaceholderShapes(
for (uint32_t i = 0; i < shape_inputs.size(); ++i) {
std::string name = ig[ig.input_nodes()[i]].source->attrs.name;
mxnet::TShape shp = shape_inputs[i];
if (shp.ndim() > 0) {
if (!mxnet::op::shape_is_none(shp)) {
// TODO(@reminisce): confirm
placeholder_shapes.emplace(name, shp);
}
}
Expand Down
2 changes: 1 addition & 1 deletion src/operator/contrib/optimizer_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ inline bool GroupAdagradShape(const nnvm::NodeAttrs &attrs,
SHAPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0));
SHAPE_ASSIGN_CHECK(*in_attrs, 1, out_attrs->at(0));

return out_attrs->at(0).ndim() != 0U && out_attrs->at(0).Size() != 0U &&
return !mxnet::op::shape_is_none(out_attrs->at(0)) &&
(in_attrs->at(0)[0] == in_attrs->at(1)[0]) &&
(in_attrs->at(0)[0] == in_attrs->at(2)[0]);
}
Expand Down
2 changes: 1 addition & 1 deletion src/operator/contrib/proposal-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ class ProposalProp : public OperatorProperty {
using namespace mshadow;
CHECK_EQ(in_shape->size(), 3) << "Input:[cls_prob, bbox_pred, im_info]";
const mxnet::TShape &dshape = in_shape->at(proposal::kClsProb);
if (dshape.ndim() == 0) return false;
if (mxnet::op::shape_is_none(dshape)) return false;
Shape<4> bbox_pred_shape;
bbox_pred_shape = Shape4(dshape[0], dshape[1] * 2, dshape[2], dshape[3]);
SHAPE_ASSIGN_CHECK(*in_shape, proposal::kBBoxPred,
Expand Down
2 changes: 1 addition & 1 deletion src/operator/contrib/quadratic_op-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ inline bool QuadraticOpShape(const nnvm::NodeAttrs& attrs,

SHAPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0));
SHAPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0));
return out_attrs->at(0).ndim() != 0U && out_attrs->at(0).Size() != 0U;
return !mxnet::op::shape_is_none(out_attrs->at(0));
}

inline bool QuadraticOpType(const nnvm::NodeAttrs& attrs,
Expand Down
2 changes: 1 addition & 1 deletion src/operator/contrib/sync_batch_norm-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -482,7 +482,7 @@ class SyncBatchNormProp : public OperatorProperty {
using namespace mshadow;
CHECK_EQ(in_shape->size(), 3U) << "Input:[data, gamma, beta]";
const mxnet::TShape &dshape = in_shape->at(0);
if (dshape.ndim() == 0) return false;
if (mxnet::op::shape_is_none(dshape)) return false;
in_shape->at(1) = mxnet::TShape(Shape1(dshape[1]));
in_shape->at(2) = mxnet::TShape(Shape1(dshape[1]));
out_shape->clear();
Expand Down
4 changes: 3 additions & 1 deletion src/operator/contrib/transformer-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,9 @@ static void DivSqrtDimForward_(const nnvm::NodeAttrs& attrs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
double sqrt_dim = std::sqrt(static_cast<double>(inputs[0].shape_[inputs[0].ndim() - 1]));
CHECK_GE(inputs[0].ndim(), 1);
int last_idx = inputs[0].ndim() - 1;
double sqrt_dim = std::sqrt(static_cast<double>(inputs[0].shape_[last_idx]));
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::div, Req>, xpu>::Launch(
Expand Down

0 comments on commit da42405

Please sign in to comment.