Skip to content

Commit

Permalink
fix bug in code_gen
Browse files Browse the repository at this point in the history
  • Loading branch information
chenjiaoAngel committed Aug 23, 2018
1 parent 093c439 commit 4dbf1f1
Show file tree
Hide file tree
Showing 4 changed files with 50 additions and 48 deletions.
4 changes: 2 additions & 2 deletions framework/graph/llvm/fusion/fusion_op_register.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -85,10 +85,10 @@ REGISTER_GRAPH_FUSION_PATTERN(EltwiseRelu)
.AddConnect("eltwise_0", "relu_0")
.CreatePattern([](VGraph* graph) {});

REGISTER_GRAPH_FUSION_PATTERN(EltwisePRelu)
REGISTER_GRAPH_FUSION_PATTERN(EltwiseActivation)
.Type(IN_ORDER)
.AddOpNode("eltwise_0", "Eltwise")
.AddOpNode("prelu_0", "PReLU")
.AddOpNode("prelu_0", "Activation")
.AddConnect("eltwise_0", "prelu_0")
.CreatePattern([](VGraph* graph) {});

Expand Down
6 changes: 3 additions & 3 deletions framework/lite/op_map_cpp.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1205,9 +1205,9 @@ std::string ParserEltwisePRelu(graph::AttrInfo& attr,
coeff_vec_code<<"}";
}
//prelu
auto prelu_channel_shared = get_attr<bool>("channel_shared", attr);
auto prelu_channel_shared = get_attr<bool>("prelu_0_channel_shared", attr);
// auto prelu_weights = get_attr<bool>("weights", attr);
auto prelu_weights = get_attr<PBlock<float, X86>>("weight_1", attr);
auto prelu_weights = get_attr<PBlock<float, X86>>("prelu_0_weight_1", attr);

writter.register_weights(node_name, prelu_weights);
LOG(INFO) << node_name << " write weights: " << prelu_weights.count();
Expand Down Expand Up @@ -1915,7 +1915,7 @@ std::unordered_map<std::string, OpParser> OPERATION_MAP({
{"DetectionOutput", {"SaberDetectionOutput", ParserDectionOutput} }, // done
{"Eltwise", {"SaberEltwise", ParserEltwise} }, //done
{"EltwiseRelu", {"SaberEltwiseAct", ParserEltwiseRelu}}, // done
{"EltwisePRelu", {"SaberEltwiseAct", ParserEltwisePRelu}}, // done
{"EltwiseActivation", {"SaberEltwiseAct", ParserEltwisePRelu}}, // done
{"Dense", {"SaberFc", ParserFc} }, // done
{"Permute", {"SaberPermute", ParserPermute} }, // done
{"Pooling", {"SaberPooling", ParserPooling} }, // done
Expand Down
58 changes: 30 additions & 28 deletions framework/operators/fusion_ops/eltwise_prelu.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,36 +6,36 @@ namespace ops {

#ifdef USE_CUDA
template<>
void EltwisePRelu<NV, AK_FLOAT, Precision::FP32>::operator()(
void EltwiseActivation<NV, AK_FLOAT, Precision::FP32>::operator()(
OpContext<NV>& ctx,
const std::vector<Tensor4dPtr<NV, AK_FLOAT> >& ins,
std::vector<Tensor4dPtr<NV, AK_FLOAT> >& outs) {
auto* impl = static_cast<EltwisePReluHelper<NV, AK_FLOAT, Precision::FP32>*>(this->_helper);
auto& param = static_cast<EltwisePReluHelper<NV, AK_FLOAT, Precision::FP32>*>
auto* impl = static_cast<EltwiseActivationHelper<NV, AK_FLOAT, Precision::FP32>*>(this->_helper);
auto& param = static_cast<EltwiseActivationHelper<NV, AK_FLOAT, Precision::FP32>*>
(this->_helper)->_param_eltwise_prelu;
impl->_funcs_eltwise_prelu(ins, outs, param, ctx);
}
#endif
#ifdef USE_ARM_PLACE
template<>
void EltwisePRelu<ARM, AK_FLOAT, Precision::FP32>::operator()(
void EltwiseActivation<ARM, AK_FLOAT, Precision::FP32>::operator()(
OpContext<ARM>& ctx,
const std::vector<Tensor4dPtr<ARM, AK_FLOAT> >& ins,
std::vector<Tensor4dPtr<ARM, AK_FLOAT> >& outs) {
auto* impl = static_cast<EltwisePReluHelper<ARM, AK_FLOAT, Precision::FP32>*>(this->_helper);
auto& param = static_cast<EltwisePReluHelper<ARM, AK_FLOAT, Precision::FP32>*>
auto* impl = static_cast<EltwiseActivationHelper<ARM, AK_FLOAT, Precision::FP32>*>(this->_helper);
auto& param = static_cast<EltwiseActivationHelper<ARM, AK_FLOAT, Precision::FP32>*>
(this->_helper)->_param_eltwise_prelu;
impl->_funcs_eltwise_prelu(ins, outs, param, ctx);
}
#endif
#ifdef USE_X86_PLACE
template<>
void EltwisePRelu<X86, AK_FLOAT, Precision::FP32>::operator()(
void EltwiseActivation<X86, AK_FLOAT, Precision::FP32>::operator()(
OpContext<X86>& ctx,
const std::vector<Tensor4dPtr<X86, AK_FLOAT> >& ins,
std::vector<Tensor4dPtr<X86, AK_FLOAT> >& outs) {
auto* impl = static_cast<EltwisePReluHelper<X86, AK_FLOAT, Precision::FP32>*>(this->_helper);
auto& param = static_cast<EltwisePReluHelper<X86, AK_FLOAT, Precision::FP32>*>
auto* impl = static_cast<EltwiseActivationHelper<X86, AK_FLOAT, Precision::FP32>*>(this->_helper);
auto& param = static_cast<EltwiseActivationHelper<X86, AK_FLOAT, Precision::FP32>*>
(this->_helper)->_param_eltwise_prelu;
impl->_funcs_eltwise_prelu(ins, outs, param, ctx);
}
Expand All @@ -45,19 +45,21 @@ void EltwisePRelu<X86, AK_FLOAT, Precision::FP32>::operator()(

/// set helper
template<typename Ttype, DataType Dtype, Precision Ptype>
EltwisePReluHelper<Ttype, Dtype, Ptype>::~EltwisePReluHelper() {
EltwiseActivationHelper<Ttype, Dtype, Ptype>::~EltwiseActivationHelper() {
}

template<typename Ttype, DataType Dtype, Precision Ptype>
Status EltwisePReluHelper<Ttype, Dtype, Ptype>::InitParam() {
DLOG(WARNING) << "Parsing EltwisePRelu op parameter.";
Status EltwiseActivationHelper<Ttype, Dtype, Ptype>::InitParam() {
DLOG(WARNING) << "Parsing EltwiseActivation op parameter.";
//FIND_PARAMETER(type);
auto type = GET_PARAMETER(std::string, type);
// auto alpha = GET_PARAMETER(float, relu_0_alpha);
auto coeff = GET_PARAMETER(PTuple<float>, coeff);

auto channel_shared = GET_PARAMETER(bool, channel_shared);
auto channel_shared = GET_PARAMETER(bool, prelu_0_channel_shared);
//printf("channel_shared: %d \n", channel_shared);
using pblock_type = PBlock<typename DataTypeWarpper<Dtype>::type, Ttype>;
auto weights = GET_PARAMETER(pblock_type, weight_1);
auto weights = GET_PARAMETER(pblock_type, prelu_0_weight_1);

PreluParam<Tensor4d<Ttype, Dtype>> prelu_param(channel_shared, &(weights.d_tensor()));

Expand Down Expand Up @@ -90,53 +92,53 @@ Status EltwisePReluHelper<Ttype, Dtype, Ptype>::InitParam() {
}

template<typename Ttype, DataType Dtype, Precision Ptype>
Status EltwisePReluHelper<Ttype, Dtype, Ptype>::Init(OpContext<Ttype>& ctx,
Status EltwiseActivationHelper<Ttype, Dtype, Ptype>::Init(OpContext<Ttype>& ctx,
const std::vector<Tensor4dPtr<Ttype, Dtype> >& ins,
std::vector<Tensor4dPtr<Ttype, Dtype> >& outs) {
_funcs_eltwise_prelu.init(ins, outs, _param_eltwise_prelu, SPECIFY, SABER_IMPL, ctx);
return Status::OK();
}

template<typename Ttype, DataType Dtype, Precision Ptype>
Status EltwisePReluHelper<Ttype, Dtype, Ptype>::InferShape(const
Status EltwiseActivationHelper<Ttype, Dtype, Ptype>::InferShape(const
std::vector<Tensor4dPtr<Ttype, Dtype> >& ins,
std::vector<Tensor4dPtr<Ttype, Dtype> >& outs) {
_funcs_eltwise_prelu.compute_output_shape(ins, outs, _param_eltwise_prelu);
return Status::OK();
}

#ifdef USE_CUDA
template class EltwisePReluHelper<NV, AK_FLOAT, Precision::FP32>;
template class EltwisePReluHelper<NV, AK_FLOAT, Precision::FP16>;
template class EltwisePReluHelper<NV, AK_FLOAT, Precision::INT8>;
template class EltwiseActivationHelper<NV, AK_FLOAT, Precision::FP32>;
template class EltwiseActivationHelper<NV, AK_FLOAT, Precision::FP16>;
template class EltwiseActivationHelper<NV, AK_FLOAT, Precision::INT8>;
#endif

#ifdef USE_ARM_PLACE
template class EltwisePReluHelper<ARM, AK_FLOAT, Precision::FP32>;
template class EltwisePReluHelper<ARM, AK_FLOAT, Precision::FP16>;
template class EltwisePReluHelper<ARM, AK_FLOAT, Precision::INT8>;
template class EltwiseActivationHelper<ARM, AK_FLOAT, Precision::FP32>;
template class EltwiseActivationHelper<ARM, AK_FLOAT, Precision::FP16>;
template class EltwiseActivationHelper<ARM, AK_FLOAT, Precision::INT8>;
#endif

#if defined(USE_X86_PLACE) || defined(BUILD_LITE)
template class EltwisePReluHelper<X86, AK_FLOAT, Precision::FP32>;
template class EltwiseActivationHelper<X86, AK_FLOAT, Precision::FP32>;
#endif

// register helper
#ifdef USE_CUDA
ANAKIN_REGISTER_OP_HELPER(EltwisePRelu, EltwisePReluHelper, NV, AK_FLOAT, Precision::FP32);
ANAKIN_REGISTER_OP_HELPER(EltwiseActivation, EltwiseActivationHelper, NV, AK_FLOAT, Precision::FP32);
#endif

#ifdef USE_ARM_PLACE
ANAKIN_REGISTER_OP_HELPER(EltwisePRelu, EltwisePReluHelper, ARM, AK_FLOAT, Precision::FP32);
ANAKIN_REGISTER_OP_HELPER(EltwiseActivation, EltwiseActivationHelper, ARM, AK_FLOAT, Precision::FP32);
#endif

#if defined(USE_X86_PLACE) || defined(BUILD_LITE)
ANAKIN_REGISTER_OP_HELPER(EltwisePRelu, EltwisePReluHelper, X86, AK_FLOAT, Precision::FP32);
ANAKIN_REGISTER_OP_HELPER(EltwiseActivation, EltwiseActivationHelper, X86, AK_FLOAT, Precision::FP32);
#endif

//! register op
ANAKIN_REGISTER_OP(EltwisePRelu)
.Doc("EltwisePRelu operator")
ANAKIN_REGISTER_OP(EltwiseActivation)
.Doc("EltwiseActivation operator")
#ifdef USE_CUDA
.__alias__<NV, AK_FLOAT, Precision::FP32>("eltwise_prelu")
#endif
Expand Down
30 changes: 15 additions & 15 deletions framework/operators/fusion_ops/eltwise_prelu.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,46 +27,46 @@ namespace anakin {
namespace ops {

template<typename Ttype, DataType Dtype, Precision Ptype>
class EltwisePReluHelper;
class EltwiseActivationHelper;

/// pooling op
/**
* \brief EltwisePRelu implementation class
* \brief EltwiseActivation implementation class
* public inherit Operator
*/
template<typename Ttype, DataType Dtype, Precision Ptype>
class EltwisePRelu : public Operator<Ttype, Dtype, Ptype> {
class EltwiseActivation : public Operator<Ttype, Dtype, Ptype> {
public:
EltwisePRelu() {}
EltwiseActivation() {}

/// forward impl
virtual void operator() (OpContext<Ttype> &ctx,
const std::vector<Tensor4dPtr<Ttype, Dtype> >& ins,
std::vector<Tensor4dPtr<Ttype, Dtype> >& outs) {
LOG(ERROR) << "Not Impl Yet Operator eltwisePrelu<TargetType:"<<"unknown"<<","
LOG(ERROR) << "Not Impl Yet Operator EltwiseActivation<TargetType:"<<"unknown"<<","
<<type_id<typename DataTypeWarpper<Dtype>::type>().type_info()<<">";
}

friend class EltwisePReluHelper<Ttype, Dtype, Ptype>;
friend class EltwiseActivationHelper<Ttype, Dtype, Ptype>;
};

/**
* \brief EltwisePRelu helper class to implement it
* \brief EltwiseActivation helper class to implement it
* public inherit OperatorHelper
* including init resource and shape size in EltwisePRelu context
* including init resource and shape size in EltwiseActivation context
*/
template<typename Ttype, DataType Dtype, Precision Ptype>
class EltwisePReluHelper : public OperatorHelper<Ttype, Dtype, Ptype> {
class EltwiseActivationHelper : public OperatorHelper<Ttype, Dtype, Ptype> {
public:
EltwisePReluHelper()=default;
EltwiseActivationHelper()=default;

~EltwisePReluHelper();
~EltwiseActivationHelper();

Status InitParam() override;

/**
* \brief initial all the resource needed by pooling
* \param ctx stand for EltwisePRelu operation context
* \param ctx stand for EltwiseActivation operation context
* \param ins stand for input tensor vector
* \param outs stand for output tensor vector
* \return status
Expand All @@ -85,13 +85,13 @@ class EltwisePReluHelper : public OperatorHelper<Ttype, Dtype, Ptype> {
std::vector<Tensor4dPtr<Ttype, Dtype> >& outs) override;

public:
///< _param_eltwise_relu stand for EltwisePRelu parameter
///< _param_eltwise_relu stand for EltwiseActivation parameter
saber::EltwiseActiveParam<Tensor4d<Ttype, Dtype>> _param_eltwise_prelu;
///< _funcs_eltwise_relu stand for EltwisePRelu function
///< _funcs_eltwise_relu stand for EltwiseActivation function
saber::EltwiseActive<Ttype, Dtype> _funcs_eltwise_prelu;

private:
///< _dims stand for EltwisePRelu size
///< _dims stand for EltwiseActivation size
PTuple<int> _dims;
};

Expand Down

0 comments on commit 4dbf1f1

Please sign in to comment.