Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
t
Browse files Browse the repository at this point in the history
  • Loading branch information
yijunc committed Jun 9, 2020
1 parent 028d01d commit 96ca827
Show file tree
Hide file tree
Showing 7 changed files with 7 additions and 168 deletions.
4 changes: 4 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -121,6 +121,10 @@ cmake_install.cmake
# Mac OS X
.DS_Store

# Windows
windows_package.7z
windows_package

#Notebook Automated Test
!tests/nightly/test_tutorial_config.txt
!tests/nightly/TestNotebook
Expand Down
6 changes: 0 additions & 6 deletions src/operator/mshadow_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,6 @@ MXNET_BINARY_MATH_OP_NC(left, a);

MXNET_BINARY_MATH_OP_NC(right, b);

#ifndef _WIN32
struct mixed_plus {
template<typename DType,
typename std::enable_if<std::is_integral<DType>::value, int>::type = 0>
Expand Down Expand Up @@ -347,8 +346,6 @@ struct mixed_rpower {
return static_cast<double>(math::pow(b, a));
}
};
#endif


#pragma GCC diagnostic push
#if __GNUC__ >= 7
Expand Down Expand Up @@ -584,7 +581,6 @@ MXNET_BINARY_MATH_OP(rpower, math::pow(b, a));
MXNET_BINARY_MATH_OP(rpower_grad, math::id(a) * math::log(b));

MXNET_BINARY_MATH_OP(arctan2, math::atan2(a, b));

MXNET_BINARY_MATH_OP(arctan2_grad, math::id(b) / (math::id(a * a + b * b)));

MXNET_BINARY_MATH_OP(arctan2_rgrad, -math::id(a) / (math::id(a * a + b * b)));
Expand Down Expand Up @@ -819,7 +815,6 @@ struct mod : public mxnet_op::tunable {
}
};

#ifndef _WIN32
struct mixed_mod {
template<typename DType,
typename std::enable_if<std::is_integral<DType>::value, int>::type = 0>
Expand Down Expand Up @@ -865,7 +860,6 @@ struct mixed_rmod {
return mod::Map(b, static_cast<double>(a));
}
};
#endif

struct fmod : public mxnet_op::tunable {
template<typename DType>
Expand Down
2 changes: 0 additions & 2 deletions src/operator/mxnet_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -867,7 +867,6 @@ struct op_with_req {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i]));
}

#ifndef _WIN32
/*! \brief inputs are two tensors with a half_t output tensor */
template<typename DType,
typename std::enable_if<std::is_integral<DType>::value, int>::type = 0>
Expand Down Expand Up @@ -921,7 +920,6 @@ struct op_with_req {
MSHADOW_XINLINE static void Map(index_t i, double *out, const DType *lhs, const double value) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], value));
}
#endif

/*! \brief inputs are two tensors with a float output tensor */
template<typename DType,
Expand Down
53 changes: 0 additions & 53 deletions src/operator/numpy/np_elemwise_broadcast_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,6 @@ bool NumpyBinaryMixedPrecisionType(const nnvm::NodeAttrs& attrs,
return true;
}

#ifndef _WIN32
#define MXNET_OPERATOR_REGISTER_NP_BINARY_MIXED_PRECISION(name) \
NNVM_REGISTER_OP(name) \
.set_num_inputs(2) \
Expand All @@ -81,40 +80,12 @@ bool NumpyBinaryMixedPrecisionType(const nnvm::NodeAttrs& attrs,
}) \
.add_argument("lhs", "NDArray-or-Symbol", "First input to the function") \
.add_argument("rhs", "NDArray-or-Symbol", "Second input to the function")
#else
#define MXNET_OPERATOR_REGISTER_NP_BINARY_MIXED_PRECISION(name) \
NNVM_REGISTER_OP(name) \
.set_num_inputs(2) \
.set_num_outputs(1) \
.set_attr<nnvm::FListInputNames>("FListInputNames", \
[](const NodeAttrs& attrs) { \
return std::vector<std::string>{"lhs", "rhs"}; \
}) \
.set_attr<mxnet::FInferShape>("FInferShape", BinaryBroadcastShape) \
.set_attr<nnvm::FInferType>("FInferType", NumpyBinaryMixedPrecisionType) \
.set_attr<nnvm::FInplaceOption>("FInplaceOption", \
[](const NodeAttrs& attrs){ \
return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}}; \
}) \
.set_attr<FResourceRequest>("FResourceRequest", \
[](const NodeAttrs& attrs) { \
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace}; \
}) \
.add_argument("lhs", "NDArray-or-Symbol", "First input to the function") \
.add_argument("rhs", "NDArray-or-Symbol", "Second input to the function")
#endif

MXNET_OPERATOR_REGISTER_NP_BINARY_MIXED_PRECISION(_npi_add)
#ifndef _WIN32
.set_attr<FCompute>(
"FCompute<cpu>",
NumpyBinaryBroadcastComputeWithBool<cpu, op::mshadow_op::plus, op::mshadow_op::mixed_plus,
op::mshadow_op::mixed_plus>)
#else
.set_attr<FCompute>(
"FCompute<cpu>",
NumpyBinaryBroadcastComputeWithBool<cpu, op::mshadow_op::plus>)
#endif
.set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseIn{"_backward_npi_broadcast_add"});

NNVM_REGISTER_OP(_backward_npi_broadcast_add)
Expand All @@ -133,16 +104,10 @@ NNVM_REGISTER_OP(_backward_npi_broadcast_add)
mshadow_op::posone>);

MXNET_OPERATOR_REGISTER_NP_BINARY_MIXED_PRECISION(_npi_subtract)
#ifndef _WIN32
.set_attr<FCompute>(
"FCompute<cpu>",
NumpyBinaryBroadcastCompute<cpu, op::mshadow_op::minus, op::mshadow_op::mixed_minus,
op::mshadow_op::mixed_rminus>)
#else
.set_attr<FCompute>(
"FCompute<cpu>",
NumpyBinaryBroadcastCompute<cpu, op::mshadow_op::minus>)
#endif
.set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseIn{"_backward_npi_broadcast_sub"});

NNVM_REGISTER_OP(_backward_npi_broadcast_sub)
Expand All @@ -161,16 +126,10 @@ NNVM_REGISTER_OP(_backward_npi_broadcast_sub)
mshadow_op::negone>);

MXNET_OPERATOR_REGISTER_NP_BINARY_MIXED_PRECISION(_npi_multiply)
#ifndef _WIN32
.set_attr<FCompute>(
"FCompute<cpu>",
NumpyBinaryBroadcastComputeWithBool<cpu, op::mshadow_op::mul, op::mshadow_op::mixed_mul,
op::mshadow_op::mixed_mul>)
#else
.set_attr<FCompute>(
"FCompute<cpu>",
NumpyBinaryBroadcastComputeWithBool<cpu, op::mshadow_op::mul>)
#endif
.set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseIn{"_backward_npi_broadcast_mul"});

NNVM_REGISTER_OP(_backward_npi_broadcast_mul)
Expand All @@ -189,16 +148,10 @@ NNVM_REGISTER_OP(_backward_npi_broadcast_mul)
mshadow_op::left>);

MXNET_OPERATOR_REGISTER_NP_BINARY_MIXED_PRECISION(_npi_mod)
#ifndef _WIN32
.set_attr<FCompute>(
"FCompute<cpu>",
NumpyBinaryBroadcastCompute<cpu, op::mshadow_op::mod, op::mshadow_op::mixed_mod,
op::mshadow_op::mixed_rmod>)
#else
.set_attr<FCompute>(
"FCompute<cpu>",
NumpyBinaryBroadcastCompute<cpu, op::mshadow_op::mod>)
#endif
.set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseIn{"_backward_npi_broadcast_mod"});

NNVM_REGISTER_OP(_backward_npi_broadcast_mod)
Expand All @@ -217,16 +170,10 @@ NNVM_REGISTER_OP(_backward_npi_broadcast_mod)
mshadow_op::mod_rgrad>);

MXNET_OPERATOR_REGISTER_NP_BINARY_MIXED_PRECISION(_npi_power)
#ifndef _WIN32
.set_attr<FCompute>(
"FCompute<cpu>",
NumpyBinaryBroadcastComputeWithBool<cpu, op::mshadow_op::power, op::mshadow_op::mixed_power,
op::mshadow_op::mixed_rpower>)
#else
.set_attr<FCompute>(
"FCompute<cpu>",
NumpyBinaryBroadcastComputeWithBool<cpu, op::mshadow_op::power>)
#endif
.set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseIn{"_backward_npi_broadcast_power"});

NNVM_REGISTER_OP(_backward_npi_broadcast_power)
Expand Down
30 changes: 0 additions & 30 deletions src/operator/numpy/np_elemwise_broadcast_op.cu
Original file line number Diff line number Diff line change
Expand Up @@ -29,80 +29,50 @@ namespace mxnet {
namespace op {

NNVM_REGISTER_OP(_npi_add)
#ifndef _WIN32
.set_attr<FCompute>(
"FCompute<gpu>",
NumpyBinaryBroadcastComputeWithBool<gpu, op::mshadow_op::plus, op::mshadow_op::mixed_plus,
op::mshadow_op::mixed_plus>);
#else
.set_attr<FCompute>(
"FCompute<gpu>",
NumpyBinaryBroadcastComputeWithBool<gpu, op::mshadow_op::plus>);
#endif

NNVM_REGISTER_OP(_backward_npi_broadcast_add)
.set_attr<FCompute>("FCompute<gpu>", NumpyBinaryBackwardUseIn<gpu, mshadow_op::posone,
mshadow_op::posone>);

NNVM_REGISTER_OP(_npi_subtract)
#ifndef _WIN32
.set_attr<FCompute>(
"FCompute<gpu>",
NumpyBinaryBroadcastCompute<gpu, op::mshadow_op::minus, op::mshadow_op::mixed_minus,
op::mshadow_op::mixed_rminus>);
#else
.set_attr<FCompute>(
"FCompute<gpu>",
NumpyBinaryBroadcastCompute<gpu, op::mshadow_op::minus>);
#endif

NNVM_REGISTER_OP(_backward_npi_broadcast_sub)
.set_attr<FCompute>("FCompute<gpu>", NumpyBinaryBackwardUseIn<gpu, mshadow_op::posone,
mshadow_op::negone>);

NNVM_REGISTER_OP(_npi_multiply)
#ifndef _WIN32
.set_attr<FCompute>(
"FCompute<gpu>",
NumpyBinaryBroadcastComputeWithBool<gpu, op::mshadow_op::mul, op::mshadow_op::mixed_mul,
op::mshadow_op::mixed_mul>);
#else
.set_attr<FCompute>(
"FCompute<gpu>",
NumpyBinaryBroadcastComputeWithBool<gpu, op::mshadow_op::mul>);
#endif

NNVM_REGISTER_OP(_backward_npi_broadcast_mul)
.set_attr<FCompute>("FCompute<gpu>", NumpyBinaryBackwardUseIn<gpu, mshadow_op::right,
mshadow_op::left>);

NNVM_REGISTER_OP(_npi_mod)
#ifndef _WIN32
.set_attr<FCompute>(
"FCompute<gpu>",
NumpyBinaryBroadcastCompute<gpu, op::mshadow_op::mod, op::mshadow_op::mixed_mod,
op::mshadow_op::mixed_rmod>);
#else
.set_attr<FCompute>(
"FCompute<gpu>",
NumpyBinaryBroadcastCompute<gpu, op::mshadow_op::mod>);
#endif

NNVM_REGISTER_OP(_backward_npi_broadcast_mod)
.set_attr<FCompute>("FCompute<gpu>", NumpyBinaryBackwardUseIn<gpu, mshadow_op::mod_grad,
mshadow_op::mod_rgrad>);

NNVM_REGISTER_OP(_npi_power)
#ifndef _WIN32
.set_attr<FCompute>(
"FCompute<gpu>",
NumpyBinaryBroadcastComputeWithBool<gpu, op::mshadow_op::power, op::mshadow_op::mixed_power,
op::mshadow_op::mixed_rpower>);
#else
.set_attr<FCompute>(
"FCompute<gpu>",
NumpyBinaryBroadcastComputeWithBool<gpu, op::mshadow_op::power>);
#endif

NNVM_REGISTER_OP(_backward_npi_broadcast_power)
.set_attr<FCompute>("FCompute<gpu>", NumpyBinaryBackwardUseIn<gpu, mshadow_op::power_grad,
Expand Down
76 changes: 1 addition & 75 deletions src/operator/numpy/np_elemwise_broadcast_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,6 @@ inline void PrintErrorMessage(const std::string& op_name, const int dtype1, cons
<< " yet...";
}

#ifndef _WIN32
template<typename xpu, typename OP>
void MixedAllRealBinaryElemwiseCompute(const std::string& op_name,
const OpContext& ctx,
Expand Down Expand Up @@ -216,13 +215,9 @@ void MixedAllRealBinaryBroadcastCompute(const std::string& op_name,
}
});
}
#endif

#ifndef _WIN32

template<typename xpu, typename OP, typename LOP, typename ROP>
#else
template<typename xpu, typename OP>
#endif
void MixedBinaryBroadcastCompute(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
Expand All @@ -237,7 +232,6 @@ void MixedBinaryBroadcastCompute(const nnvm::NodeAttrs& attrs,
const TBlob& rhs = inputs[1];
const TBlob& out = outputs[0];

#ifndef _WIN32
mxnet::TShape new_lshape, new_rshape, new_oshape;
int ndim = BinaryBroadcastShapeCompact(lhs.shape_, rhs.shape_, out.shape_,
&new_lshape, &new_rshape, &new_oshape);
Expand Down Expand Up @@ -303,64 +297,9 @@ void MixedBinaryBroadcastCompute(const nnvm::NodeAttrs& attrs,
PrintErrorMessage(attrs.op->name, lhs.type_flag_, rhs.type_flag_);
}
}
#else
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
if (common::is_float(lhs.type_flag_) || common::is_float(rhs.type_flag_)) {
TBlob temp_tblob;
// one is float, the other is bool
CHECK((out.type_flag_ == lhs.type_flag_) || (out.type_flag_ == rhs.type_flag_))
<< "This case out type should be same as the float type";
if (lhs.type_flag_ == out.type_flag_) {
MSHADOW_REAL_TYPE_SWITCH(lhs.type_flag_, LType, {
Tensor<xpu, 1, LType> temp_tensor =
ctx.requested[0].get_space_typed<xpu, 1, LType>(Shape1(rhs.Size()), s);
temp_tblob = TBlob(temp_tensor);
});
CastCompute<xpu>(attrs, ctx, {rhs}, {kWriteTo}, {temp_tblob});
BinaryBroadcastCompute<xpu, OP>(
attrs, ctx, {lhs, temp_tblob.reshape(rhs.shape_)}, req, outputs);
} else {
MSHADOW_REAL_TYPE_SWITCH(rhs.type_flag_, RType, {
Tensor<xpu, 1, RType> temp_tensor =
ctx.requested[0].get_space_typed<xpu, 1, RType>(Shape1(lhs.Size()), s);
temp_tblob = TBlob(temp_tensor);
});
CastCompute<xpu>(attrs, ctx, {lhs}, {kWriteTo}, {temp_tblob});
BinaryBroadcastCompute<xpu, OP>(
attrs, ctx, {temp_tblob.reshape(lhs.shape_), rhs}, req, outputs);
}
} else if (!common::is_float(lhs.type_flag_) && !common::is_float(rhs.type_flag_)) {
TBlob temp_tblob;
if (lhs.type_flag_ == out.type_flag_) {
MXNET_INT_TYPE_SWITCH(lhs.type_flag_, LType, {
Tensor<xpu, 1, LType> temp_tensor =
ctx.requested[0].get_space_typed<xpu, 1, LType>(Shape1(rhs.Size()), s);
temp_tblob = TBlob(temp_tensor);
});
CastCompute<xpu>(attrs, ctx, {rhs}, {kWriteTo}, {temp_tblob});
BinaryBroadcastCompute<xpu, OP>(
attrs, ctx, {lhs, temp_tblob.reshape(rhs.shape_)}, req, outputs);
} else {
MXNET_INT_TYPE_SWITCH(rhs.type_flag_, RType, {
Tensor<xpu, 1, RType> temp_tensor =
ctx.requested[0].get_space_typed<xpu, 1, RType>(Shape1(lhs.Size()), s);
temp_tblob = TBlob(temp_tensor);
});
CastCompute<xpu>(attrs, ctx, {lhs}, {kWriteTo}, {temp_tblob});
BinaryBroadcastCompute<xpu, OP>(
attrs, ctx, {temp_tblob.reshape(lhs.shape_), rhs}, req, outputs);
}
} else {
PrintErrorMessage(attrs.op->name, lhs.type_flag_, rhs.type_flag_);
}
#endif
}

#ifndef _WIN32
template<typename xpu, typename OP, typename LOP, typename ROP>
#else
template<typename xpu, typename OP>
#endif
void NumpyBinaryBroadcastCompute(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
Expand All @@ -382,18 +321,10 @@ void NumpyBinaryBroadcastCompute(const nnvm::NodeAttrs& attrs,
return;
}

#ifndef _WIN32
MixedBinaryBroadcastCompute<xpu, OP, LOP, ROP>(attrs, ctx, inputs, req, outputs);
#else
MixedBinaryBroadcastCompute<xpu, OP>(attrs, ctx, inputs, req, outputs);
#endif
}

#ifndef _WIN32
template<typename xpu, typename OP, typename LOP, typename ROP>
#else
template<typename xpu, typename OP>
#endif
void NumpyBinaryBroadcastComputeWithBool(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
Expand Down Expand Up @@ -438,12 +369,7 @@ void NumpyBinaryBroadcastComputeWithBool(const nnvm::NodeAttrs& attrs,
}
return;
}

#ifndef _WIN32
MixedBinaryBroadcastCompute<xpu, OP, LOP, ROP>(attrs, ctx, inputs, req, outputs);
#else
MixedBinaryBroadcastCompute<xpu, OP>(attrs, ctx, inputs, req, outputs);
#endif
}

template<typename xpu, typename LOP, typename ROP>
Expand Down
Loading

0 comments on commit 96ca827

Please sign in to comment.