From a7cacf0ad95aac68e5fffc3e5db8887cf4f075f8 Mon Sep 17 00:00:00 2001 From: "Vafin, Maxim" Date: Wed, 12 May 2021 23:09:58 +0300 Subject: [PATCH 1/2] Add clang target --- ngraph/frontend/paddlepaddle/CMakeLists.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ngraph/frontend/paddlepaddle/CMakeLists.txt b/ngraph/frontend/paddlepaddle/CMakeLists.txt index 6a9c1f058bce9b..2d4319ed3a6126 100644 --- a/ngraph/frontend/paddlepaddle/CMakeLists.txt +++ b/ngraph/frontend/paddlepaddle/CMakeLists.txt @@ -55,6 +55,8 @@ endif() target_link_libraries(paddlepaddle_frontend PRIVATE ${Protobuf_LIBRARIES} PUBLIC ngraph PRIVATE ngraph::builder) +add_clang_format_target(paddlepaddle_frontend_clang FOR_TARGETS paddlepaddle_frontend) + # TODO: Consider to remove the following block (inherited from onnx_import just in case). if (CMAKE_CXX_COMPILER_ID MATCHES "^(Apple)?Clang$") target_compile_options(paddlepaddle_frontend PRIVATE -Wno-undef -Wno-reserved-id-macro -Wno-switch-enum From df7ab69aa01ad399bfdb0246a41c1a766d9442ca Mon Sep 17 00:00:00 2001 From: "Vafin, Maxim" Date: Wed, 12 May 2021 23:54:14 +0300 Subject: [PATCH 2/2] Fix code-style --- .../paddlepaddle_frontend/frontend.hpp | 76 +-- .../include/paddlepaddle_frontend/model.hpp | 67 +-- .../include/paddlepaddle_frontend/place.hpp | 354 ++++++------ .../include/paddlepaddle_frontend/utility.hpp | 17 +- ngraph/frontend/paddlepaddle/src/decoder.cpp | 181 ++++--- ngraph/frontend/paddlepaddle/src/decoder.hpp | 70 +-- ngraph/frontend/paddlepaddle/src/frontend.cpp | 133 +++-- ngraph/frontend/paddlepaddle/src/model.cpp | 329 +++++++----- .../paddlepaddle/src/node_context.hpp | 304 ++++++----- .../frontend/paddlepaddle/src/op/argmax.cpp | 64 ++- .../frontend/paddlepaddle/src/op/argmax.hpp | 20 +- .../paddlepaddle/src/op/assign_value.cpp | 98 ++-- .../paddlepaddle/src/op/assign_value.hpp | 21 +- .../paddlepaddle/src/op/batch_norm.cpp | 36 +- .../paddlepaddle/src/op/batch_norm.hpp | 20 +- ngraph/frontend/paddlepaddle/src/op/cast.cpp | 29 +- ngraph/frontend/paddlepaddle/src/op/cast.hpp | 20 +- ngraph/frontend/paddlepaddle/src/op/clip.cpp | 31 +- ngraph/frontend/paddlepaddle/src/op/clip.hpp | 20 +- .../frontend/paddlepaddle/src/op/concat.cpp | 27 +- .../frontend/paddlepaddle/src/op/concat.hpp | 20 +- .../frontend/paddlepaddle/src/op/conv2d.cpp | 48 +- .../frontend/paddlepaddle/src/op/conv2d.hpp | 20 +- .../paddlepaddle/src/op/elementwise_ops.cpp | 124 +++-- .../paddlepaddle/src/op/elementwise_ops.hpp | 32 +- .../paddlepaddle/src/op/fill_constant.cpp | 53 +- .../paddlepaddle/src/op/fill_constant.hpp | 21 +- .../src/op/fill_constant_batch_size_like.cpp | 58 +- .../src/op/fill_constant_batch_size_like.hpp | 20 +- .../src/op/flatten_contiguous_range.cpp | 69 ++- .../src/op/flatten_contiguous_range.hpp | 20 +- .../frontend/paddlepaddle/src/op/interp.cpp | 294 +++++----- .../frontend/paddlepaddle/src/op/interp.hpp | 23 +- .../paddlepaddle/src/op/leakyrelu.cpp | 28 +- .../paddlepaddle/src/op/leakyrelu.hpp | 20 +- .../frontend/paddlepaddle/src/op/matmul.cpp | 40 +- .../frontend/paddlepaddle/src/op/matmul.hpp | 21 +- ngraph/frontend/paddlepaddle/src/op/mul.cpp | 85 +-- ngraph/frontend/paddlepaddle/src/op/mul.hpp | 20 +- .../paddlepaddle/src/op/multiclass_nms.cpp | 52 +- .../paddlepaddle/src/op/multiclass_nms.hpp | 20 +- ngraph/frontend/paddlepaddle/src/op/pad3d.cpp | 167 +++--- ngraph/frontend/paddlepaddle/src/op/pad3d.hpp | 20 +- .../frontend/paddlepaddle/src/op/pool2d.cpp | 87 +-- .../frontend/paddlepaddle/src/op/pool2d.hpp | 20 +- ngraph/frontend/paddlepaddle/src/op/relu.cpp | 23 +- ngraph/frontend/paddlepaddle/src/op/relu.hpp | 20 +- .../frontend/paddlepaddle/src/op/reshape2.cpp | 43 +- .../frontend/paddlepaddle/src/op/reshape2.hpp | 21 +- ngraph/frontend/paddlepaddle/src/op/scale.cpp | 49 +- ngraph/frontend/paddlepaddle/src/op/scale.hpp | 20 +- ngraph/frontend/paddlepaddle/src/op/slice.cpp | 76 +-- ngraph/frontend/paddlepaddle/src/op/slice.hpp | 20 +- .../frontend/paddlepaddle/src/op/softmax.cpp | 38 +- .../frontend/paddlepaddle/src/op/softmax.hpp | 21 +- ngraph/frontend/paddlepaddle/src/op/split.cpp | 57 +- ngraph/frontend/paddlepaddle/src/op/split.hpp | 21 +- .../frontend/paddlepaddle/src/op/squeeze.cpp | 39 +- .../frontend/paddlepaddle/src/op/squeeze.hpp | 20 +- .../paddlepaddle/src/op/transpose2.cpp | 37 +- .../paddlepaddle/src/op/transpose2.hpp | 20 +- .../paddlepaddle/src/op/unsqueeze.cpp | 31 +- .../paddlepaddle/src/op/unsqueeze.hpp | 20 +- .../frontend/paddlepaddle/src/op/yolo_box.cpp | 505 ++++++++++-------- .../frontend/paddlepaddle/src/op/yolo_box.hpp | 20 +- ngraph/frontend/paddlepaddle/src/op_table.cpp | 92 ++-- ngraph/frontend/paddlepaddle/src/op_table.hpp | 21 +- ngraph/frontend/paddlepaddle/src/place.cpp | 101 ++-- 68 files changed, 2660 insertions(+), 1954 deletions(-) diff --git a/ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/frontend.hpp b/ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/frontend.hpp index e190f82a31de29..55def3bed69577 100644 --- a/ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/frontend.hpp +++ b/ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/frontend.hpp @@ -20,48 +20,52 @@ #include "model.hpp" -namespace ngraph { -namespace frontend { - -class NGRAPH_API FrontEndPDPD : public FrontEnd +namespace ngraph { - static std::shared_ptr convert_model(const std::shared_ptr& model); -public: - - FrontEndPDPD () + namespace frontend { - } + class NGRAPH_API FrontEndPDPD : public FrontEnd + { + static std::shared_ptr + convert_model(const std::shared_ptr& model); + + public: + FrontEndPDPD() {} - /** - * @brief Reads model from file and deducts file names of weights - * @param path path to folder which contains __model__ file or path to .pdmodel file - * @return InputModel::Ptr - */ - virtual InputModel::Ptr loadFromFile (const std::string& path) const override; + /** + * @brief Reads model from file and deducts file names of weights + * @param path path to folder which contains __model__ file or path to .pdmodel file + * @return InputModel::Ptr + */ + virtual InputModel::Ptr loadFromFile(const std::string& path) const override; - /** - * @brief Reads model and weights from files - * @param paths vector containing path to .pdmodel and .pdiparams files - * @return InputModel::Ptr - */ - virtual InputModel::Ptr loadFromFiles (const std::vector& paths) const override; + /** + * @brief Reads model and weights from files + * @param paths vector containing path to .pdmodel and .pdiparams files + * @return InputModel::Ptr + */ + virtual InputModel::Ptr + loadFromFiles(const std::vector& paths) const override; - /** - * @brief Reads model from stream - * @param model_stream stream containing .pdmodel or __model__ files. Can only be used if model have no weights - * @return InputModel::Ptr - */ - virtual InputModel::Ptr loadFromStream (std::istream& model_stream) const override; + /** + * @brief Reads model from stream + * @param model_stream stream containing .pdmodel or __model__ files. Can only be used + * if model have no weights + * @return InputModel::Ptr + */ + virtual InputModel::Ptr loadFromStream(std::istream& model_stream) const override; - /** - * @brief Reads model from stream - * @param paths vector of streams containing .pdmodel and .pdiparams files. Can't be used in case of multiple weight files - * @return InputModel::Ptr - */ - virtual InputModel::Ptr loadFromStreams (const std::vector& paths) const override; + /** + * @brief Reads model from stream + * @param paths vector of streams containing .pdmodel and .pdiparams files. Can't be + * used in case of multiple weight files + * @return InputModel::Ptr + */ + virtual InputModel::Ptr + loadFromStreams(const std::vector& paths) const override; - virtual std::shared_ptr convert (InputModel::Ptr model) const override; -}; + virtual std::shared_ptr convert(InputModel::Ptr model) const override; + }; -} // namespace frontend + } // namespace frontend } // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/model.hpp b/ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/model.hpp index 1cf9930d80c76b..c7b7effa09af34 100644 --- a/ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/model.hpp +++ b/ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/model.hpp @@ -18,38 +18,39 @@ #include -namespace ngraph { -namespace frontend { - -class OpPlacePDPD; -class TensorPlacePDPD; - -class NGRAPH_API InputModelPDPD : public InputModel +namespace ngraph { - friend class FrontEndPDPD; - class InputModelPDPDImpl; - std::shared_ptr _impl; - - std::vector> getOpPlaces() const; - std::map> getVarPlaces() const; - std::map> getTensorValues() const; - -public: - explicit InputModelPDPD (const std::string& path); - explicit InputModelPDPD (const std::vector& streams); - std::vector getInputs () const override; - std::vector getOutputs () const override; - Place::Ptr getPlaceByTensorName (const std::string& tensorName) const override; - void overrideAllOutputs (const std::vector& outputs) override; - void overrideAllInputs (const std::vector& inputs) override; - void extractSubgraph (const std::vector& inputs, const std::vector& outputs) override; - void setDefaultShape (Place::Ptr place, const ngraph::Shape&) override; - void setPartialShape (Place::Ptr place, const ngraph::PartialShape&) override; - ngraph::PartialShape getPartialShape (Place::Ptr place) const override; - void setElementType (Place::Ptr place, const ngraph::element::Type&) override; - void setTensorValue (Place::Ptr place, const void* value) override; - -}; - -} // namespace frontend + namespace frontend + { + class OpPlacePDPD; + class TensorPlacePDPD; + + class NGRAPH_API InputModelPDPD : public InputModel + { + friend class FrontEndPDPD; + class InputModelPDPDImpl; + std::shared_ptr _impl; + + std::vector> getOpPlaces() const; + std::map> getVarPlaces() const; + std::map> getTensorValues() const; + + public: + explicit InputModelPDPD(const std::string& path); + explicit InputModelPDPD(const std::vector& streams); + std::vector getInputs() const override; + std::vector getOutputs() const override; + Place::Ptr getPlaceByTensorName(const std::string& tensorName) const override; + void overrideAllOutputs(const std::vector& outputs) override; + void overrideAllInputs(const std::vector& inputs) override; + void extractSubgraph(const std::vector& inputs, + const std::vector& outputs) override; + void setDefaultShape(Place::Ptr place, const ngraph::Shape&) override; + void setPartialShape(Place::Ptr place, const ngraph::PartialShape&) override; + ngraph::PartialShape getPartialShape(Place::Ptr place) const override; + void setElementType(Place::Ptr place, const ngraph::element::Type&) override; + void setTensorValue(Place::Ptr place, const void* value) override; + }; + + } // namespace frontend } // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/place.hpp b/ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/place.hpp index c12b7e2fc06c33..904c8202a6916c 100644 --- a/ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/place.hpp +++ b/ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/place.hpp @@ -19,169 +19,199 @@ #include #include -namespace paddle { -namespace framework { -namespace proto { +namespace paddle +{ + namespace framework + { + namespace proto + { + class OpDesc; + class VarDesc; + } // namespace proto + } // namespace framework +} // namespace paddle + +namespace ngraph +{ + namespace frontend + { + class TensorPlacePDPD; + class OpPlacePDPD; + + class PlacePDPD : public Place + { + public: + PlacePDPD(const InputModel& input_model, const std::vector& names) + : m_input_model(input_model) + , m_names(names) + { + } + + explicit PlacePDPD(const InputModel& input_model) + : PlacePDPD(input_model, std::vector{}) + { + } + + ~PlacePDPD() override = default; + + bool isInput() const override; + + bool isOutput() const override; + + bool isEqual(Ptr another) const override { return this == another.get(); } + + std::vector getNames() const override { return m_names; } + + private: + const InputModel& m_input_model; + std::vector m_names; + }; + + class InPortPlacePDPD : public PlacePDPD + { + public: + explicit InPortPlacePDPD(const InputModel& input_model) + : PlacePDPD(input_model) + { + } + + void setOp(const std::weak_ptr& op) { m_op = op; } + + void setSourceTensor(const std::weak_ptr& source_tensor) + { + m_source_tensor = source_tensor; + } + + std::shared_ptr getSourceTensorPDPD() const; + + std::shared_ptr getOp(); + + private: + std::weak_ptr m_source_tensor; + std::weak_ptr m_op; + }; + + class OutPortPlacePDPD : public PlacePDPD + { + public: + explicit OutPortPlacePDPD(const InputModel& input_model) + : PlacePDPD(input_model) + { + } + + void setOp(const std::weak_ptr& op) { m_op = op; } + + void setTargetTensor(const std::weak_ptr& target_tensor) + { + m_target_tensor = target_tensor; + } + + std::shared_ptr getTargetTensorPDPD() const; + + private: + std::weak_ptr m_op; + std::weak_ptr m_target_tensor; + }; + + class OpPlacePDPD : public PlacePDPD + { + public: + OpPlacePDPD(const InputModel& input_model, + const std::vector& names, + const std::shared_ptr& op_desc); + + OpPlacePDPD(const InputModel& input_model, + const std::shared_ptr& op_desc); + + void addInPort(const std::shared_ptr& input, const std::string& name) + { + m_input_ports[name].push_back(input); + } + + void addOutPort(const std::shared_ptr& output, + const std::string& name) + { + m_output_ports[name].push_back(output); + } + + const std::map>>& + getOutputPorts() const + { + return m_output_ports; + } + + const std::map>>& + getInputPorts() const + { + return m_input_ports; + } -class OpDesc; -class VarDesc; + std::shared_ptr getOutputPortPDPD(const std::string& name, int idx) + { + return m_output_ports[name][idx]; + } -} // proto -} // framework -} // paddle + std::shared_ptr getInputPortPDPD(const std::string& name, int idx) + { + return m_input_ports[name][idx]; + } -namespace ngraph { -namespace frontend { - -class TensorPlacePDPD; -class OpPlacePDPD; - -class PlacePDPD : public Place { -public: - PlacePDPD(const InputModel& input_model, const std::vector& names) - : m_input_model(input_model), - m_names(names) { - } - - explicit PlacePDPD(const InputModel& input_model) : PlacePDPD(input_model, std::vector{}) { - } - - ~PlacePDPD() override = default; - - bool isInput() const override; - - bool isOutput() const override; - - bool isEqual(Ptr another) const override { return this == another.get(); } - - std::vector getNames() const override { return m_names; } - -private: - const InputModel& m_input_model; - std::vector m_names; -}; - -class InPortPlacePDPD : public PlacePDPD { -public: - explicit InPortPlacePDPD(const InputModel& input_model) - : PlacePDPD(input_model) { - } - - void setOp(const std::weak_ptr& op) { - m_op = op; - } - - void setSourceTensor(const std::weak_ptr& source_tensor) { - m_source_tensor = source_tensor; - } - - std::shared_ptr getSourceTensorPDPD() const; - - std::shared_ptr getOp(); -private: - std::weak_ptr m_source_tensor; - std::weak_ptr m_op; -}; - -class OutPortPlacePDPD : public PlacePDPD { -public: - explicit OutPortPlacePDPD(const InputModel& input_model) - : PlacePDPD(input_model) { - } - - void setOp(const std::weak_ptr& op) { m_op = op; } - - void setTargetTensor(const std::weak_ptr& target_tensor) { - m_target_tensor = target_tensor; - } - - std::shared_ptr getTargetTensorPDPD() const; - -private: - std::weak_ptr m_op; - std::weak_ptr m_target_tensor; -}; - -class OpPlacePDPD : public PlacePDPD { -public: - OpPlacePDPD(const InputModel& input_model, - const std::vector& names, - const std::shared_ptr& op_desc); - - OpPlacePDPD(const InputModel& input_model, - const std::shared_ptr& op_desc); - - void addInPort(const std::shared_ptr& input, const std::string& name) { - m_input_ports[name].push_back(input); - } - - void addOutPort(const std::shared_ptr& output, const std::string& name) { - m_output_ports[name].push_back(output); - } - - const std::map>>& getOutputPorts() const { - return m_output_ports; - } - - const std::map>>& getInputPorts() const { - return m_input_ports; - } - - std::shared_ptr getOutputPortPDPD(const std::string& name, int idx) { - return m_output_ports[name][idx]; - } - - std::shared_ptr getInputPortPDPD(const std::string& name, int idx) { - return m_input_ports[name][idx]; - } - - const std::shared_ptr& getDesc() const { return m_op_desc; } - -private: - std::shared_ptr m_op_desc; - std::map>> m_input_ports; - std::map>> m_output_ports; -}; - -class TensorPlacePDPD : public PlacePDPD { -public: - TensorPlacePDPD(const InputModel& input_model, - const std::vector& names, - const std::shared_ptr& var_desc); - - TensorPlacePDPD(const InputModel& input_model, - const std::shared_ptr& var_desc); - - void addProducingPort(const std::shared_ptr& out_port) { - m_producing_ports.push_back(out_port); - } - - void addConsumingPort(const std::shared_ptr& in_port) { - m_consuming_ports.push_back(in_port); - } - - std::vector getConsumingPorts () const override; - - Ptr getProducingPort () const override; - - const PartialShape& getPartialShape() const { return m_pshape; } - - const element::Type& getElementType() const { return m_type; } - - void setPartialShape(const PartialShape& pshape) { m_pshape = pshape; } - - void setElementType(const element::Type& type) { m_type = type; } - - const std::shared_ptr& getDesc() const { return m_var_desc; } - -private: - std::shared_ptr m_var_desc; - PartialShape m_pshape; - element::Type m_type; - - std::vector> m_producing_ports; - std::vector> m_consuming_ports; -}; - -} // namespace frontend + const std::shared_ptr& getDesc() const + { + return m_op_desc; + } + + private: + std::shared_ptr m_op_desc; + std::map>> m_input_ports; + std::map>> m_output_ports; + }; + + class TensorPlacePDPD : public PlacePDPD + { + public: + TensorPlacePDPD(const InputModel& input_model, + const std::vector& names, + const std::shared_ptr& var_desc); + + TensorPlacePDPD(const InputModel& input_model, + const std::shared_ptr& var_desc); + + void addProducingPort(const std::shared_ptr& out_port) + { + m_producing_ports.push_back(out_port); + } + + void addConsumingPort(const std::shared_ptr& in_port) + { + m_consuming_ports.push_back(in_port); + } + + std::vector getConsumingPorts() const override; + + Ptr getProducingPort() const override; + + const PartialShape& getPartialShape() const { return m_pshape; } + + const element::Type& getElementType() const { return m_type; } + + void setPartialShape(const PartialShape& pshape) { m_pshape = pshape; } + + void setElementType(const element::Type& type) { m_type = type; } + + const std::shared_ptr& getDesc() const + { + return m_var_desc; + } + + private: + std::shared_ptr m_var_desc; + PartialShape m_pshape; + element::Type m_type; + + std::vector> m_producing_ports; + std::vector> m_consuming_ports; + }; + + } // namespace frontend } // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/utility.hpp b/ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/utility.hpp index 5f2f1a18387a84..565f4d4cbe5051 100644 --- a/ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/utility.hpp +++ b/ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/utility.hpp @@ -18,16 +18,19 @@ #include -namespace ngraph { -namespace frontend { - -inline void PDPD_ASSERT(bool ex, const std::string& msg = "Unspecified error.") { - if (!ex) throw std::runtime_error(msg); -} +namespace ngraph +{ + namespace frontend + { + inline void PDPD_ASSERT(bool ex, const std::string& msg = "Unspecified error.") + { + if (!ex) + throw std::runtime_error(msg); + } #define PDPD_THROW(msg) throw std::runtime_error(std::string("ERROR: ") + msg) #define NOT_IMPLEMENTED(msg) throw std::runtime_error(std::string(msg) + " is not implemented") -} // namespace frontend + } // namespace frontend } // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/decoder.cpp b/ngraph/frontend/paddlepaddle/src/decoder.cpp index f9bc5fce328d09..1e24de23d25e55 100644 --- a/ngraph/frontend/paddlepaddle/src/decoder.cpp +++ b/ngraph/frontend/paddlepaddle/src/decoder.cpp @@ -4,56 +4,60 @@ #include #include -#include +#include #include +#include #include -#include #include -#include +#include #include "framework.pb.h" #include "decoder.hpp" - -namespace ngraph { -namespace frontend { - +using namespace ngraph; +using namespace ngraph::frontend; using namespace paddle::framework; -std::map TYPE_MAP{ - {proto::VarType_Type::VarType_Type_BOOL, ngraph::element::boolean}, - {proto::VarType_Type::VarType_Type_INT16, ngraph::element::i16}, - {proto::VarType_Type::VarType_Type_INT32, ngraph::element::i32}, - {proto::VarType_Type::VarType_Type_INT64, ngraph::element::i64}, - {proto::VarType_Type::VarType_Type_FP16, ngraph::element::f16}, - {proto::VarType_Type::VarType_Type_FP32, ngraph::element::f32}, - {proto::VarType_Type::VarType_Type_FP64, ngraph::element::f64}, - {proto::VarType_Type::VarType_Type_UINT8, ngraph::element::u8}, - {proto::VarType_Type::VarType_Type_INT8, ngraph::element::i8}, - {proto::VarType_Type::VarType_Type_BF16, ngraph::element::bf16} -}; - -ngraph::element::Type DecoderPDPDProto::get_dtype(const std::string& name, ngraph::element::Type def) const +std::map TYPE_MAP{ + {proto::VarType_Type::VarType_Type_BOOL, element::boolean}, + {proto::VarType_Type::VarType_Type_INT16, element::i16}, + {proto::VarType_Type::VarType_Type_INT32, element::i32}, + {proto::VarType_Type::VarType_Type_INT64, element::i64}, + {proto::VarType_Type::VarType_Type_FP16, element::f16}, + {proto::VarType_Type::VarType_Type_FP32, element::f32}, + {proto::VarType_Type::VarType_Type_FP64, element::f64}, + {proto::VarType_Type::VarType_Type_UINT8, element::u8}, + {proto::VarType_Type::VarType_Type_INT8, element::i8}, + {proto::VarType_Type::VarType_Type_BF16, element::bf16}}; + +element::Type DecoderPDPDProto::get_dtype(const std::string& name, element::Type def) const { auto dtype = (paddle::framework::proto::VarType_Type)get_int(name); return TYPE_MAP[dtype]; } -std::vector DecoderPDPDProto::get_ints(const std::string& name, const std::vector& def) const +std::vector DecoderPDPDProto::get_ints(const std::string& name, + const std::vector& def) const { std::cout << "Running get_ints" << std::endl; std::vector attrs; - for (const auto &attr : op_place->getDesc()->attrs()) { + for (const auto& attr : op_place->getDesc()->attrs()) + { if (attr.name() == name) attrs.push_back(attr); } - if (attrs.size() == 0) { + if (attrs.size() == 0) + { return def; - } else if (attrs.size() > 1) { + } + else if (attrs.size() > 1) + { // TODO: raise exception here return def; - } else { + } + else + { return std::vector(attrs[0].ints().begin(), attrs[0].ints().end()); } } @@ -61,35 +65,49 @@ std::vector DecoderPDPDProto::get_ints(const std::string& name, const s int DecoderPDPDProto::get_int(const std::string& name, int def) const { std::vector attrs; - for (const auto &attr : op_place->getDesc()->attrs()) { + for (const auto& attr : op_place->getDesc()->attrs()) + { if (attr.name() == name) attrs.push_back(attr); } - if (attrs.size() == 0) { + if (attrs.size() == 0) + { return def; - } else if (attrs.size() > 1) { + } + else if (attrs.size() > 1) + { // TODO: raise exception here return def; - } else { + } + else + { return attrs[0].i(); } } -std::vector DecoderPDPDProto::get_floats(const std::string& name, const std::vector& def) const +std::vector DecoderPDPDProto::get_floats(const std::string& name, + const std::vector& def) const { std::vector attrs; - for (const auto &attr : op_place->getDesc()->attrs()) { - if (attr.name() == name) { + for (const auto& attr : op_place->getDesc()->attrs()) + { + if (attr.name() == name) + { attrs.push_back(attr); std::cout << attr.type() << std::endl; } } - if (attrs.size() == 0) { + if (attrs.size() == 0) + { return def; - } else if (attrs.size() > 1) { + } + else if (attrs.size() > 1) + { // TODO: raise exception here return def; - } else { + } + else + { return std::vector(attrs[0].floats().begin(), attrs[0].floats().end()); } } @@ -97,16 +115,22 @@ std::vector DecoderPDPDProto::get_floats(const std::string& name, const s float DecoderPDPDProto::get_float(const std::string& name, float def) const { std::vector attrs; - for (const auto &attr : op_place->getDesc()->attrs()) { + for (const auto& attr : op_place->getDesc()->attrs()) + { if (attr.name() == name) attrs.push_back(attr); } - if (attrs.size() == 0) { + if (attrs.size() == 0) + { return def; - } else if (attrs.size() > 1) { + } + else if (attrs.size() > 1) + { // TODO: raise exception here return def; - } else { + } + else + { return attrs[0].f(); } } @@ -114,16 +138,22 @@ float DecoderPDPDProto::get_float(const std::string& name, float def) const std::string DecoderPDPDProto::get_str(const std::string& name, const std::string& def) const { std::vector attrs; - for (const auto &attr : op_place->getDesc()->attrs()) { + for (const auto& attr : op_place->getDesc()->attrs()) + { if (attr.name() == name) attrs.push_back(attr); } - if (attrs.size() == 0) { + if (attrs.size() == 0) + { return def; - } else if (attrs.size() > 1) { + } + else if (attrs.size() > 1) + { // TODO: raise exception here return def; - } else { + } + else + { return attrs[0].s(); } } @@ -131,35 +161,47 @@ std::string DecoderPDPDProto::get_str(const std::string& name, const std::string bool DecoderPDPDProto::get_bool(const std::string& name, bool def) const { std::vector attrs; - for (const auto &attr : op_place->getDesc()->attrs()) { + for (const auto& attr : op_place->getDesc()->attrs()) + { if (attr.name() == name) attrs.push_back(attr); } - if (attrs.size() == 0) { + if (attrs.size() == 0) + { return def; - } else if (attrs.size() > 1) { + } + else if (attrs.size() > 1) + { // TODO: raise exception here return def; - } else { + } + else + { return attrs[0].b(); } } -std::vector DecoderPDPDProto::get_longs(const std::string& name, const std::vector& def) const +std::vector DecoderPDPDProto::get_longs(const std::string& name, + const std::vector& def) const { std::cout << "Running get_longs" << std::endl; std::vector attrs; - for (const auto &attr : op_place->getDesc()->attrs()) { + for (const auto& attr : op_place->getDesc()->attrs()) + { if (attr.name() == name) attrs.push_back(attr); } - if (attrs.empty()) { + if (attrs.empty()) + { return def; - } else if (attrs.size() > 1) { + } + else if (attrs.size() > 1) + { // TODO: raise exception here return def; - } else { - + } + else + { return std::vector(attrs[0].longs().begin(), attrs[0].longs().end()); } } @@ -168,35 +210,42 @@ int64_t DecoderPDPDProto::get_long(const std::string& name, const int64_t& def) { std::cout << "Running get_long" << std::endl; std::vector attrs; - for (const auto &attr : op_place->getDesc()->attrs()) { + for (const auto& attr : op_place->getDesc()->attrs()) + { if (attr.name() == name) attrs.push_back(attr); } - if (attrs.empty()) { + if (attrs.empty()) + { return def; - } else if (attrs.size() > 1) { + } + else if (attrs.size() > 1) + { // TODO: raise exception here return def; - } else { + } + else + { return attrs[0].l(); } } -std::vector DecoderPDPDProto::get_output_names() const { +std::vector DecoderPDPDProto::get_output_names() const +{ std::vector output_names; - for (const auto& output : op_place->getDesc()->outputs()) { + for (const auto& output : op_place->getDesc()->outputs()) + { output_names.push_back(output.parameter()); } return output_names; } -std::vector DecoderPDPDProto::get_out_port_types(const std::string& port_name) const { - std::vector output_types; - for (const auto& out_port : op_place->getOutputPorts().at(port_name)) { +std::vector DecoderPDPDProto::get_out_port_types(const std::string& port_name) const +{ + std::vector output_types; + for (const auto& out_port : op_place->getOutputPorts().at(port_name)) + { output_types.push_back(out_port->getTargetTensorPDPD()->getElementType()); } return output_types; } - -} // namespace frontend -} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/decoder.hpp b/ngraph/frontend/paddlepaddle/src/decoder.hpp index 3afb5fc542ee33..2f9b26cd2d67e0 100644 --- a/ngraph/frontend/paddlepaddle/src/decoder.hpp +++ b/ngraph/frontend/paddlepaddle/src/decoder.hpp @@ -14,17 +14,16 @@ // limitations under the License. //***************************************************************************** - #pragma once #include #include -#include +#include #include +#include #include -#include #include -#include +#include #include "framework.pb.h" @@ -34,37 +33,46 @@ #include #include -namespace ngraph { -namespace frontend { - -extern std::map TYPE_MAP; +namespace ngraph +{ + namespace frontend + { + extern std::map TYPE_MAP; -// TODO: Inherit from one of the ngraph classes -class AttributeNotFound : public std::exception -{}; + // TODO: Inherit from one of the ngraph classes + class AttributeNotFound : public std::exception + { + }; -class DecoderPDPDProto -{ - std::shared_ptr op_place; + class DecoderPDPDProto + { + std::shared_ptr op_place; -public: - explicit DecoderPDPDProto (const std::shared_ptr& op) : op_place(op) {} + public: + explicit DecoderPDPDProto(const std::shared_ptr& op) + : op_place(op) + { + } - std::vector get_ints(const std::string& name, const std::vector& def = {}) const; - int get_int(const std::string& name, int def = 0) const; - std::vector get_floats(const std::string& name, const std::vector& def = {}) const; - float get_float(const std::string& name, float def = 0.) const; - std::string get_str(const std::string& name, const std::string& def = "") const; - bool get_bool (const std::string& name, bool def = false) const; - std::vector get_longs(const std::string& name, const std::vector& def = {}) const; - int64_t get_long(const std::string& name, const int64_t& def = {}) const; + std::vector get_ints(const std::string& name, + const std::vector& def = {}) const; + int get_int(const std::string& name, int def = 0) const; + std::vector get_floats(const std::string& name, + const std::vector& def = {}) const; + float get_float(const std::string& name, float def = 0.) const; + std::string get_str(const std::string& name, const std::string& def = "") const; + bool get_bool(const std::string& name, bool def = false) const; + std::vector get_longs(const std::string& name, + const std::vector& def = {}) const; + int64_t get_long(const std::string& name, const int64_t& def = {}) const; - // TODO: Further populate get_XXX methods on demand - ngraph::element::Type get_dtype(const std::string& name, ngraph::element::Type def) const; + // TODO: Further populate get_XXX methods on demand + ngraph::element::Type get_dtype(const std::string& name, + ngraph::element::Type def) const; - std::vector get_output_names() const; - std::vector get_out_port_types(const std::string& port_name) const; -}; + std::vector get_output_names() const; + std::vector get_out_port_types(const std::string& port_name) const; + }; -} -} + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/frontend.cpp b/ngraph/frontend/paddlepaddle/src/frontend.cpp index 78c639958572fd..adac2e9c11518d 100644 --- a/ngraph/frontend/paddlepaddle/src/frontend.cpp +++ b/ngraph/frontend/paddlepaddle/src/frontend.cpp @@ -14,16 +14,15 @@ // limitations under the License. //***************************************************************************** - #include -#include #include -#include +#include #include +#include +#include #include -#include #include -#include +#include #include "framework.pb.h" @@ -42,47 +41,51 @@ using namespace ngraph::opset7; -namespace ngraph { -namespace frontend { -namespace pdpd { - -NamedOutputs make_ng_node(std::map>& nodes, - const std::shared_ptr& op_place, - const std::map& CREATORS_MAP) { - const auto& op = op_place->getDesc(); - std::cout << "Making node: " << op->type() << std::endl; - - PDPD_ASSERT(CREATORS_MAP.find(op->type()) != CREATORS_MAP.end(), "No creator found"); - pdpd::NamedInputs named_inputs; - const auto& input_ports = op_place->getInputPorts(); - for (const auto& name_to_ports : input_ports) { - for (const auto& port : name_to_ports.second) { - const auto& var_desc = port->getSourceTensorPDPD()->getDesc(); - if (nodes.count(var_desc->name())) - named_inputs[name_to_ports.first].push_back(nodes.at(var_desc->name())); - else - // return empty map when not all inputs exist. It usually means that these - // nodes are not used because model inputs were overwritten - return NamedOutputs(); +using namespace ngraph; +using namespace ngraph::frontend; + +namespace pdpd +{ + NamedOutputs make_ng_node(std::map>& nodes, + const std::shared_ptr& op_place, + const std::map& CREATORS_MAP) + { + const auto& op = op_place->getDesc(); + std::cout << "Making node: " << op->type() << std::endl; + + PDPD_ASSERT(CREATORS_MAP.find(op->type()) != CREATORS_MAP.end(), "No creator found"); + pdpd::NamedInputs named_inputs; + const auto& input_ports = op_place->getInputPorts(); + for (const auto& name_to_ports : input_ports) + { + for (const auto& port : name_to_ports.second) + { + const auto& var_desc = port->getSourceTensorPDPD()->getDesc(); + if (nodes.count(var_desc->name())) + named_inputs[name_to_ports.first].push_back(nodes.at(var_desc->name())); + else + // return empty map when not all inputs exist. It usually means that + // these nodes are not used because model inputs were overwritten + return NamedOutputs(); + } } - } - return CREATORS_MAP.at(op->type())(NodeContext(DecoderPDPDProto(op_place), named_inputs)); -} + return CREATORS_MAP.at(op->type())(NodeContext(DecoderPDPDProto(op_place), named_inputs)); + } } // namespace pdpd -std::shared_ptr - FrontEndPDPD::convert_model(const std::shared_ptr& model) +std::shared_ptr FrontEndPDPD::convert_model(const std::shared_ptr& model) { - std::cout << "Convert Model Start" << std::endl; - + std::cout << "Convert Model Start" << std::endl; + std::map> nodes_dict(model->getTensorValues()); ParameterVector parameter_nodes; ResultVector result_nodes; - + std::map CREATORS_MAP = pdpd::get_supported_ops(); - for (const auto& _inp_place: model->getInputs()) { + for (const auto& _inp_place : model->getInputs()) + { const auto& inp_place = std::dynamic_pointer_cast(_inp_place); const auto& var = inp_place->getDesc(); const auto& shape = inp_place->getPartialShape(); @@ -94,32 +97,42 @@ std::shared_ptr } const auto& op_places = model->getOpPlaces(); - for (const auto &op_place : op_places) { + for (const auto& op_place : op_places) + { const auto& op_type = op_place->getDesc()->type(); std::cerr << "Observing " << op_type << "\n"; - if (op_type == "feed" || op_type == "fetch") { + if (op_type == "feed" || op_type == "fetch") + { // inputs and outputs are stored in the model already continue; - } else { + } + else + { const auto& named_outputs = pdpd::make_ng_node(nodes_dict, op_place, CREATORS_MAP); // set layer name by the name of first output var - if (!named_outputs.empty()) { - const auto &first_output_var = op_place->getOutputPorts().begin()->second.at( - 0)->getTargetTensorPDPD()->getDesc(); + if (!named_outputs.empty()) + { + const auto& first_output_var = op_place->getOutputPorts() + .begin() + ->second.at(0) + ->getTargetTensorPDPD() + ->getDesc(); auto node = named_outputs.begin()->second[0].get_node_shared_ptr(); node->set_friendly_name(first_output_var->name()); std::cerr << "Named with " << node->get_friendly_name() << "\n"; } const auto& out_ports = op_place->getOutputPorts(); - for (const auto& name_to_outputs : named_outputs) { + for (const auto& name_to_outputs : named_outputs) + { const auto& ports = out_ports.at(name_to_outputs.first); PDPD_ASSERT(ports.size() == name_to_outputs.second.size(), "The number of output tensors must be equal to " "the number of outputs of the ngraph node."); - for (size_t idx = 0; idx < ports.size(); ++idx) { + for (size_t idx = 0; idx < ports.size(); ++idx) + { const auto& var = ports[idx]->getTargetTensorPDPD()->getDesc(); name_to_outputs.second[idx].get_tensor().set_names({var->name()}); // if nodes_dict already has node mapped to this tensor name it usually @@ -131,7 +144,8 @@ std::shared_ptr } } - for (const auto& _outp_place: model->getOutputs()) { + for (const auto& _outp_place : model->getOutputs()) + { const auto& outp_place = std::dynamic_pointer_cast(_outp_place); auto var = outp_place->getDesc(); auto input_var_name = var->name(); @@ -140,18 +154,23 @@ std::shared_ptr result_nodes.push_back(result); } - return std::make_shared(result_nodes, parameter_nodes); + return std::make_shared(result_nodes, parameter_nodes); } -InputModel::Ptr FrontEndPDPD::loadFromFile (const std::string& path) const { +InputModel::Ptr FrontEndPDPD::loadFromFile(const std::string& path) const +{ return loadFromFiles({path}); } -InputModel::Ptr FrontEndPDPD::loadFromFiles (const std::vector& paths) const { - if (paths.size() == 1) { +InputModel::Ptr FrontEndPDPD::loadFromFiles(const std::vector& paths) const +{ + if (paths.size() == 1) + { // The case when folder with __model__ and weight files is provided or .pdmodel file return std::make_shared(paths[0]); - } else if (paths.size() == 2) { + } + else if (paths.size() == 2) + { // The case when .pdmodel and .pdparams files are provided std::ifstream model_stream(paths[0], std::ios::in | std::ifstream::binary); PDPD_ASSERT(model_stream && model_stream.is_open(), "Cannot open model file."); @@ -162,21 +181,21 @@ InputModel::Ptr FrontEndPDPD::loadFromFiles (const std::vector& pat PDPD_THROW("Model can be loaded either from 1 or 2 files"); } -InputModel::Ptr FrontEndPDPD::loadFromStream (std::istream& model_stream) const { +InputModel::Ptr FrontEndPDPD::loadFromStream(std::istream& model_stream) const +{ return loadFromStreams({&model_stream}); } -InputModel::Ptr FrontEndPDPD::loadFromStreams (const std::vector& streams) const { +InputModel::Ptr FrontEndPDPD::loadFromStreams(const std::vector& streams) const +{ return std::make_shared(streams); } -std::shared_ptr FrontEndPDPD::convert(InputModel::Ptr model) const { +std::shared_ptr FrontEndPDPD::convert(InputModel::Ptr model) const +{ std::cerr << "[ INFO ] PFrontEndPDPD::convert invoked\n"; - auto pdpd_model = std::dynamic_pointer_cast(model); + auto pdpd_model = std::dynamic_pointer_cast(model); auto f = convert_model(pdpd_model); std::cerr << "[ INFO ] Resulting nGraph function contains " << f->get_ops().size() << "\n"; return f; } - -} // namespace frontend -} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/model.cpp b/ngraph/frontend/paddlepaddle/src/model.cpp index 01ff74bbc06092..e755cba5183c33 100644 --- a/ngraph/frontend/paddlepaddle/src/model.cpp +++ b/ngraph/frontend/paddlepaddle/src/model.cpp @@ -7,36 +7,39 @@ #include #include -#include "framework.pb.h" +#include #include "decoder.hpp" +#include "framework.pb.h" #include "node_context.hpp" -#include - -namespace ngraph { -namespace frontend { +using namespace ngraph; +using namespace ngraph::frontend; using namespace paddle::framework::proto; -class InputModelPDPD::InputModelPDPDImpl { +class InputModelPDPD::InputModelPDPDImpl +{ public: + InputModelPDPDImpl(const std::string& path, const InputModel& input_model); + InputModelPDPDImpl(const std::vector& streams, const InputModel& input_model); + std::vector getInputs() const; + std::vector getOutputs() const; + Place::Ptr getPlaceByTensorName(const std::string& tensorName) const; + void overrideAllOutputs(const std::vector& outputs); + void overrideAllInputs(const std::vector& inputs); + void extractSubgraph(const std::vector& inputs, + const std::vector& outputs); + void setDefaultShape(Place::Ptr place, const Shape&); + void setPartialShape(Place::Ptr place, const PartialShape&); + PartialShape getPartialShape(Place::Ptr place) const; + void setElementType(Place::Ptr place, const element::Type&); + void setTensorValue(Place::Ptr place, const void* value); - InputModelPDPDImpl (const std::string& path, const InputModel& input_model); - InputModelPDPDImpl (const std::vector& streams, const InputModel& input_model); - std::vector getInputs () const; - std::vector getOutputs () const; - Place::Ptr getPlaceByTensorName (const std::string& tensorName) const; - void overrideAllOutputs (const std::vector& outputs); - void overrideAllInputs (const std::vector& inputs); - void extractSubgraph (const std::vector& inputs, const std::vector& outputs); - void setDefaultShape (Place::Ptr place, const ngraph::Shape&); - void setPartialShape (Place::Ptr place, const ngraph::PartialShape&); - ngraph::PartialShape getPartialShape (Place::Ptr place) const; - void setElementType (Place::Ptr place, const ngraph::element::Type&); - void setTensorValue (Place::Ptr place, const void* value); - std::vector readWeight(const std::string& name, int64_t len); std::vector> getOpPlaces() const { return m_op_places; } - std::map> getVarPlaces() const { return m_var_places; } + std::map> getVarPlaces() const + { + return m_var_places; + } std::map> getTensorValues() const { return m_tensor_values; }; private: @@ -52,23 +55,31 @@ class InputModelPDPD::InputModelPDPDImpl { std::map> m_tensor_values; }; -void InputModelPDPD::InputModelPDPDImpl::loadPlaces() { +void InputModelPDPD::InputModelPDPDImpl::loadPlaces() +{ const int cnt_of_blocks = m_fw_ptr->blocks_size(); const auto& blocks = m_fw_ptr->blocks(); - for (int block_idx = 0; block_idx < cnt_of_blocks; block_idx++) { + for (int block_idx = 0; block_idx < cnt_of_blocks; block_idx++) + { const auto& block = blocks[block_idx]; - for (const auto& var : block.vars()) { - m_var_places[var.name()] = std::make_shared(m_input_model, std::make_shared(var)); + for (const auto& var : block.vars()) + { + m_var_places[var.name()] = + std::make_shared(m_input_model, std::make_shared(var)); } - for (const auto& op : block.ops()) { - auto op_place = std::make_shared(m_input_model, std::make_shared(op)); + for (const auto& op : block.ops()) + { + auto op_place = + std::make_shared(m_input_model, std::make_shared(op)); m_op_places.push_back(op_place); - for (const auto &output : op.outputs()) { - for (const auto &var_name : output.arguments()) { + for (const auto& output : op.outputs()) + { + for (const auto& var_name : output.arguments()) + { auto out_port = std::make_shared(m_input_model); // connect out_port and tensor @@ -82,8 +93,10 @@ void InputModelPDPD::InputModelPDPDImpl::loadPlaces() { } } - for (const auto &input : op.inputs()) { - for (const auto &var_name : input.arguments()) { + for (const auto& input : op.inputs()) + { + for (const auto& var_name : input.arguments()) + { auto in_port = std::make_shared(m_input_model); // connect in_port and tensor @@ -98,16 +111,21 @@ void InputModelPDPD::InputModelPDPDImpl::loadPlaces() { } // Determine outputs and inputs - if (op.type() == "feed") { + if (op.type() == "feed") + { const auto& place = op_place->getOutputPortPDPD("Out", 0); - const auto& var_place = std::dynamic_pointer_cast(place->getTargetTensorPDPD()); + const auto& var_place = + std::dynamic_pointer_cast(place->getTargetTensorPDPD()); const auto& tensor_desc = var_place->getDesc()->type().lod_tensor().tensor(); const auto& dims = tensor_desc.dims(); var_place->setElementType(TYPE_MAP[tensor_desc.data_type()]); - var_place->setPartialShape(PartialShape(std::vector(dims.begin(), dims.end()))); + var_place->setPartialShape( + PartialShape(std::vector(dims.begin(), dims.end()))); m_inputs.push_back(var_place); - } else if (op.type() == "fetch") { + } + else if (op.type() == "fetch") + { auto place = op_place->getInputPortPDPD("X", 0); m_outputs.push_back(place->getSourceTensorPDPD()); } @@ -115,29 +133,35 @@ void InputModelPDPD::InputModelPDPDImpl::loadPlaces() { } } -namespace pdpd { - -bool endsWith(const std::string &str, const std::string &suffix) { - if (str.length() >= suffix.length()) { - return (0 == str.compare(str.length() - suffix.length(), suffix.length(), suffix)); +namespace pdpd +{ + bool endsWith(const std::string& str, const std::string& suffix) + { + if (str.length() >= suffix.length()) + { + return (0 == str.compare(str.length() - suffix.length(), suffix.length(), suffix)); + } + return false; } - return false; -} -void read_tensor(std::istream& is, char* data, size_t len) { - std::vector header(16); - is.read(&header[0], 16); - uint32_t dims_len = 0; - is.read(reinterpret_cast(&dims_len), 4); - std::vector dims_struct(dims_len); - is.read(&dims_struct[0], dims_len); - is.read(data, len); -} + void read_tensor(std::istream& is, char* data, size_t len) + { + std::vector header(16); + is.read(&header[0], 16); + uint32_t dims_len = 0; + is.read(reinterpret_cast(&dims_len), 4); + std::vector dims_struct(dims_len); + is.read(&dims_struct[0], dims_len); + is.read(data, len); + } } // namespace pdpd -void InputModelPDPD::InputModelPDPDImpl::loadConsts(std::string folder_with_weights, std::istream* weight_stream) { - for (const auto& item: m_var_places) { +void InputModelPDPD::InputModelPDPDImpl::loadConsts(std::string folder_with_weights, + std::istream* weight_stream) +{ + for (const auto& item : m_var_places) + { const auto& var_desc = item.second->getDesc(); const auto& name = item.first; if (pdpd::endsWith(name, "feed") || pdpd::endsWith(name, "fetch")) @@ -150,15 +174,22 @@ void InputModelPDPD::InputModelPDPDImpl::loadConsts(std::string folder_with_weig Shape shape(tensor.dims().cbegin(), tensor.dims().cend()); const auto& type = TYPE_MAP[tensor.data_type()]; const auto& data_length = shape_size(shape) * type.size(); - std::vector tensor_data(data_length); - - if (weight_stream) { - pdpd::read_tensor(*weight_stream, reinterpret_cast(&tensor_data[0]), data_length); - } else if (!folder_with_weights.empty()) { - std::ifstream is(folder_with_weights + "/" + name, std::ios::in | std::ifstream::binary); + std::vector tensor_data(data_length); + + if (weight_stream) + { + pdpd::read_tensor( + *weight_stream, reinterpret_cast(&tensor_data[0]), data_length); + } + else if (!folder_with_weights.empty()) + { + std::ifstream is(folder_with_weights + "/" + name, + std::ios::in | std::ifstream::binary); PDPD_ASSERT(is && is.is_open(), "Cannot open file for constant value."); pdpd::read_tensor(is, reinterpret_cast(&tensor_data[0]), data_length); - } else { + } + else + { PDPD_THROW("Either folder with weights or stream must be provided."); } @@ -168,41 +199,55 @@ void InputModelPDPD::InputModelPDPDImpl::loadConsts(std::string folder_with_weig } } -InputModelPDPD::InputModelPDPDImpl::InputModelPDPDImpl(const std::string& path, const InputModel& input_model) - : m_fw_ptr{std::make_shared()}, - m_input_model(input_model) { - std::string ext = ".pdmodel"; +InputModelPDPD::InputModelPDPDImpl::InputModelPDPDImpl(const std::string& path, + const InputModel& input_model) + : m_fw_ptr{std::make_shared()} + , m_input_model(input_model) +{ + std::string ext = ".pdmodel"; std::string model_file(path); std::unique_ptr weights_stream; - if (model_file.length() >= ext.length() && (0 == model_file.compare(model_file.length() - ext.length(), ext.length(), ext))) + if (model_file.length() >= ext.length() && + (0 == model_file.compare(model_file.length() - ext.length(), ext.length(), ext))) { std::string weights_file(path); weights_file.replace(weights_file.size() - ext.size(), ext.size(), ".pdiparams"); - weights_stream = std::unique_ptr(new std::ifstream(weights_file, std::ios::binary)); + weights_stream = + std::unique_ptr(new std::ifstream(weights_file, std::ios::binary)); // if file isn't open it means model don't have constants or path is wrong - if (!weights_stream || !weights_stream->is_open()) { - std::cerr << "[WARNING:] Cannot open file containing weights: " << weights_file << std::endl; + if (!weights_stream || !weights_stream->is_open()) + { + std::cerr << "[WARNING:] Cannot open file containing weights: " << weights_file + << std::endl; } - } else { + } + else + { model_file += "/__model__"; } std::ifstream pb_stream(model_file, std::ios::binary); PDPD_ASSERT(m_fw_ptr->ParseFromIstream(&pb_stream), "Model can't be parsed"); - - std::cout << "Loading places" << std::endl; - loadPlaces(); - std::cout << "Loading consts" << std::endl; + + std::cout << "Loading places" << std::endl; + loadPlaces(); + std::cout << "Loading consts" << std::endl; loadConsts(weights_stream ? "" : path, weights_stream.get()); } -InputModelPDPD::InputModelPDPDImpl::InputModelPDPDImpl(const std::vector& streams, const InputModel& input_model) - : m_fw_ptr{std::make_shared()}, - m_input_model(input_model) { - if (streams.size() == 1) { +InputModelPDPD::InputModelPDPDImpl::InputModelPDPDImpl(const std::vector& streams, + const InputModel& input_model) + : m_fw_ptr{std::make_shared()} + , m_input_model(input_model) +{ + if (streams.size() == 1) + { std::cerr << "[WARNING:] Stream for weights not provided." << std::endl; - } else { - PDPD_ASSERT(streams.size() == 2, "Two streams are needed to load a model: model and weights streams"); + } + else + { + PDPD_ASSERT(streams.size() == 2, + "Two streams are needed to load a model: model and weights streams"); } PDPD_ASSERT(m_fw_ptr->ParseFromIstream(streams[0]), "Model can't be parsed"); @@ -211,71 +256,93 @@ InputModelPDPD::InputModelPDPDImpl::InputModelPDPDImpl(const std::vector InputModelPDPD::InputModelPDPDImpl::getInputs () const { +std::vector InputModelPDPD::InputModelPDPDImpl::getInputs() const +{ return m_inputs; } -std::vector InputModelPDPD::InputModelPDPDImpl::getOutputs () const { +std::vector InputModelPDPD::InputModelPDPDImpl::getOutputs() const +{ return m_outputs; } -Place::Ptr InputModelPDPD::InputModelPDPDImpl::getPlaceByTensorName (const std::string& tensorName) const { +Place::Ptr + InputModelPDPD::InputModelPDPDImpl::getPlaceByTensorName(const std::string& tensorName) const +{ if (m_var_places.count(tensorName)) return m_var_places.at(tensorName); return nullptr; } -namespace pdpd { - -std::shared_ptr castToTensorPlace(const Place::Ptr& place) { - if (auto var_place = std::dynamic_pointer_cast(place)) { - return var_place; - } else if (auto in_port_place = std::dynamic_pointer_cast(place)) { - return in_port_place->getSourceTensorPDPD(); - } else if (auto out_port_place = std::dynamic_pointer_cast(place)) { - return out_port_place->getTargetTensorPDPD(); +namespace pdpd +{ + std::shared_ptr castToTensorPlace(const Place::Ptr& place) + { + if (auto var_place = std::dynamic_pointer_cast(place)) + { + return var_place; + } + else if (auto in_port_place = std::dynamic_pointer_cast(place)) + { + return in_port_place->getSourceTensorPDPD(); + } + else if (auto out_port_place = std::dynamic_pointer_cast(place)) + { + return out_port_place->getTargetTensorPDPD(); + } + PDPD_THROW("Cannot cast this Place to TensorPlacePDPD."); } - PDPD_THROW("Cannot cast this Place to TensorPlacePDPD."); -} } // namespace pdpd -void InputModelPDPD::InputModelPDPDImpl::overrideAllInputs (const std::vector& inputs) { +void InputModelPDPD::InputModelPDPDImpl::overrideAllInputs(const std::vector& inputs) +{ m_inputs.clear(); - for (const auto& inp : inputs) { + for (const auto& inp : inputs) + { m_inputs.push_back(pdpd::castToTensorPlace(inp)); } } -void InputModelPDPD::InputModelPDPDImpl::overrideAllOutputs (const std::vector& outputs) { +void InputModelPDPD::InputModelPDPDImpl::overrideAllOutputs(const std::vector& outputs) +{ m_outputs.clear(); - for (const auto& outp : outputs) { + for (const auto& outp : outputs) + { m_outputs.push_back(pdpd::castToTensorPlace(outp)); } } -void InputModelPDPD::InputModelPDPDImpl::extractSubgraph (const std::vector& inputs, const std::vector& outputs) { +void InputModelPDPD::InputModelPDPDImpl::extractSubgraph(const std::vector& inputs, + const std::vector& outputs) +{ overrideAllInputs(inputs); overrideAllOutputs(outputs); } -void InputModelPDPD::InputModelPDPDImpl::setDefaultShape (Place::Ptr place, const ngraph::Shape& shape) { +void InputModelPDPD::InputModelPDPDImpl::setDefaultShape(Place::Ptr place, const Shape& shape) +{ NOT_IMPLEMENTED("setDefaultShape"); } -void InputModelPDPD::InputModelPDPDImpl::setPartialShape (Place::Ptr place, const ngraph::PartialShape& p_shape) { +void InputModelPDPD::InputModelPDPDImpl::setPartialShape(Place::Ptr place, + const PartialShape& p_shape) +{ pdpd::castToTensorPlace(place)->setPartialShape(p_shape); } -ngraph::PartialShape InputModelPDPD::InputModelPDPDImpl::getPartialShape (Place::Ptr place) const { +PartialShape InputModelPDPD::InputModelPDPDImpl::getPartialShape(Place::Ptr place) const +{ return pdpd::castToTensorPlace(place)->getPartialShape(); } -void InputModelPDPD::InputModelPDPDImpl::setElementType (Place::Ptr place, const ngraph::element::Type& type) { +void InputModelPDPD::InputModelPDPDImpl::setElementType(Place::Ptr place, const element::Type& type) +{ pdpd::castToTensorPlace(place)->setElementType(type); } -void InputModelPDPD::InputModelPDPDImpl::setTensorValue (Place::Ptr place, const void* value) { +void InputModelPDPD::InputModelPDPDImpl::setTensorValue(Place::Ptr place, const void* value) +{ auto tensor_place = pdpd::castToTensorPlace(place); auto p_shape = tensor_place->getPartialShape(); auto type = tensor_place->getElementType(); @@ -285,65 +352,83 @@ void InputModelPDPD::InputModelPDPDImpl::setTensorValue (Place::Ptr place, const m_tensor_values[name] = constant; } -InputModelPDPD::InputModelPDPD (const std::string& path) : _impl{std::make_shared(path, *this)} {} +InputModelPDPD::InputModelPDPD(const std::string& path) + : _impl{std::make_shared(path, *this)} +{ +} -InputModelPDPD::InputModelPDPD (const std::vector& streams) : _impl{std::make_shared(streams, *this)} {} +InputModelPDPD::InputModelPDPD(const std::vector& streams) + : _impl{std::make_shared(streams, *this)} +{ +} -std::vector> InputModelPDPD::getOpPlaces() const { +std::vector> InputModelPDPD::getOpPlaces() const +{ return _impl->getOpPlaces(); } -std::map> InputModelPDPD::getVarPlaces() const { +std::map> InputModelPDPD::getVarPlaces() const +{ return _impl->getVarPlaces(); } -std::map> InputModelPDPD::getTensorValues() const { +std::map> InputModelPDPD::getTensorValues() const +{ return _impl->getTensorValues(); } -std::vector InputModelPDPD::getInputs () const { +std::vector InputModelPDPD::getInputs() const +{ return _impl->getInputs(); } -std::vector InputModelPDPD::getOutputs () const { +std::vector InputModelPDPD::getOutputs() const +{ return _impl->getOutputs(); } -Place::Ptr InputModelPDPD::getPlaceByTensorName (const std::string& tensorName) const { +Place::Ptr InputModelPDPD::getPlaceByTensorName(const std::string& tensorName) const +{ return _impl->getPlaceByTensorName(tensorName); } -void InputModelPDPD::overrideAllOutputs (const std::vector& outputs) { +void InputModelPDPD::overrideAllOutputs(const std::vector& outputs) +{ return _impl->overrideAllOutputs(outputs); } -void InputModelPDPD::overrideAllInputs (const std::vector& inputs) { +void InputModelPDPD::overrideAllInputs(const std::vector& inputs) +{ return _impl->overrideAllInputs(inputs); } -void InputModelPDPD::extractSubgraph (const std::vector& inputs, const std::vector& outputs) { +void InputModelPDPD::extractSubgraph(const std::vector& inputs, + const std::vector& outputs) +{ return _impl->extractSubgraph(inputs, outputs); } -void InputModelPDPD::setDefaultShape (Place::Ptr place, const ngraph::Shape& shape) { +void InputModelPDPD::setDefaultShape(Place::Ptr place, const Shape& shape) +{ return _impl->setDefaultShape(place, shape); } -void InputModelPDPD::setPartialShape (Place::Ptr place, const ngraph::PartialShape& p_shape) { +void InputModelPDPD::setPartialShape(Place::Ptr place, const PartialShape& p_shape) +{ return _impl->setPartialShape(place, p_shape); } -ngraph::PartialShape InputModelPDPD::getPartialShape (Place::Ptr place) const { +PartialShape InputModelPDPD::getPartialShape(Place::Ptr place) const +{ return _impl->getPartialShape(place); } -void InputModelPDPD::setElementType (Place::Ptr place, const ngraph::element::Type& type) { +void InputModelPDPD::setElementType(Place::Ptr place, const element::Type& type) +{ return _impl->setElementType(place, type); } -void InputModelPDPD::setTensorValue (Place::Ptr place, const void* value) { +void InputModelPDPD::setTensorValue(Place::Ptr place, const void* value) +{ return _impl->setTensorValue(place, value); } - -} // namespace frontend -} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/node_context.hpp b/ngraph/frontend/paddlepaddle/src/node_context.hpp index 54154b3fe1e562..d158a0dd189de0 100644 --- a/ngraph/frontend/paddlepaddle/src/node_context.hpp +++ b/ngraph/frontend/paddlepaddle/src/node_context.hpp @@ -15,133 +15,189 @@ //***************************************************************************** #pragma once -#include "decoder.hpp" #include #include +#include "decoder.hpp" -namespace ngraph { -namespace frontend { -namespace pdpd { - -using InPortName = std::string; -using OutPortName = std::string; -using TensorName = std::string; -using NamedOutputs = std::map; -using NamedInputs = std::map; - -/// Keep necessary data for a single node in the original FW graph to facilitate conversion process in the rules code. -class NodeContext +namespace ngraph { - const DecoderPDPDProto& node; - const NamedInputs& name_map; - -public: - - NodeContext (const DecoderPDPDProto& _node, NamedInputs& _name_map) : node(_node), name_map(_name_map) {} - - /// Detects if there is at least one input attached with a given name - bool has_ng_input (const std::string& name) const - { - auto found = name_map.find(name); - if(found != name_map.end()) - return !found->second.empty(); - return false; - } - - size_t get_ng_input_size (const std::string& name) const { return name_map.at(name).size(); } - - /// Returns exactly one input with a given name; throws if there is no inputs or there are more than one input - Output get_ng_input (const std::string& name) const - { - PDPD_ASSERT(name_map.at(name).size() == 1); - return name_map.at(name).at(0); - } - - /// Returns all inputs with a given name - OutputVector get_ng_inputs (const std::string& name) const { return name_map.at(name); } - - template - T get_attribute (const std::string& name, const T& def = T()) const; - - template - bool has_attribute (const std::string& name) const + namespace frontend { - // TODO: Rework this hack - try { - get_attribute(name); - return true; - } - catch(const AttributeNotFound&) { - return false; - } - } - - std::vector get_output_names() const { return node.get_output_names(); } - std::vector get_out_port_types(const std::string& port_name) const - { return node.get_out_port_types(port_name); } - ngraph::element::Type get_out_port_type(const std::string& port_name) const; - NamedOutputs default_single_output_mapping(const std::shared_ptr &ngraph_node, - const std::vector& required_pdpd_out_names) const; -}; - -template <> -inline int32_t NodeContext::get_attribute (const std::string& name, const int32_t& def) const -{ return node.get_int(name, def); } - -template <> -inline float NodeContext::get_attribute (const std::string& name, const float& def) const -{ return node.get_float(name, def); } - -template <> -inline std::string NodeContext::get_attribute (const std::string& name, const std::string& def) const -{ return node.get_str(name, def); } - -template <> -inline std::vector NodeContext::get_attribute (const std::string& name, const std::vector& def) const -{ return node.get_ints(name, def); } - -template <> -inline std::vector NodeContext::get_attribute (const std::string& name, const std::vector& def) const -{ return node.get_floats(name, def); } - -template <> -inline bool NodeContext::get_attribute (const std::string& name, const bool& def) const -{ return node.get_bool(name, def); } - -template <> -inline ngraph::element::Type NodeContext::get_attribute (const std::string& name, const ngraph::element::Type& def) const -{ return node.get_dtype(name, def); } - -inline ngraph::element::Type NodeContext::get_out_port_type(const std::string& port_name) const -{ - auto types = get_out_port_types(port_name); - PDPD_ASSERT(types.size() > 0, "Port has no tensors connected."); - PDPD_ASSERT(std::equal(types.begin() + 1, types.end(), types.begin()), - "Port has tensors with different types connected."); - return types[0]; -} - -inline NamedOutputs NodeContext::default_single_output_mapping(const std::shared_ptr& ngraph_node, - const std::vector& required_pdpd_out_names) const -{ - NamedOutputs named_outputs; - const auto& ngraph_outputs = ngraph_node->outputs(); - const auto& pdpd_op_output_names = this->get_output_names(); - PDPD_ASSERT(ngraph_outputs.size() == 1, "nGraph node must have exactly one output"); - for (const auto& pdpd_name : pdpd_op_output_names) { - if (std::find(required_pdpd_out_names.begin(), required_pdpd_out_names.end(), pdpd_name) != required_pdpd_out_names.end()) - named_outputs[pdpd_name] = {ngraph_outputs[0]}; - } - return named_outputs; -} -template <> -inline std::vector NodeContext::get_attribute (const std::string& name, const std::vector& def) const -{ return node.get_longs(name, def); } - -template <> -inline int64_t NodeContext::get_attribute (const std::string& name, const int64_t& def) const -{ return node.get_long(name, def); } - -} // namespace pdpd -} // namespace frontend + namespace pdpd + { + using InPortName = std::string; + using OutPortName = std::string; + using TensorName = std::string; + using NamedOutputs = std::map; + using NamedInputs = std::map; + + /// Keep necessary data for a single node in the original FW graph to facilitate + /// conversion process in the rules code. + class NodeContext + { + const DecoderPDPDProto& node; + const NamedInputs& name_map; + + public: + NodeContext(const DecoderPDPDProto& _node, NamedInputs& _name_map) + : node(_node) + , name_map(_name_map) + { + } + + /// Detects if there is at least one input attached with a given name + bool has_ng_input(const std::string& name) const + { + auto found = name_map.find(name); + if (found != name_map.end()) + return !found->second.empty(); + return false; + } + + size_t get_ng_input_size(const std::string& name) const + { + return name_map.at(name).size(); + } + + /// Returns exactly one input with a given name; throws if there is no inputs or + /// there are more than one input + Output get_ng_input(const std::string& name) const + { + PDPD_ASSERT(name_map.at(name).size() == 1); + return name_map.at(name).at(0); + } + + /// Returns all inputs with a given name + OutputVector get_ng_inputs(const std::string& name) const + { + return name_map.at(name); + } + + template + T get_attribute(const std::string& name, const T& def = T()) const; + + template + bool has_attribute(const std::string& name) const + { + // TODO: Rework this hack + try + { + get_attribute(name); + return true; + } + catch (const AttributeNotFound&) + { + return false; + } + } + + std::vector get_output_names() const + { + return node.get_output_names(); + } + std::vector + get_out_port_types(const std::string& port_name) const + { + return node.get_out_port_types(port_name); + } + ngraph::element::Type get_out_port_type(const std::string& port_name) const; + NamedOutputs default_single_output_mapping( + const std::shared_ptr& ngraph_node, + const std::vector& required_pdpd_out_names) const; + }; + + template <> + inline int32_t NodeContext::get_attribute(const std::string& name, + const int32_t& def) const + { + return node.get_int(name, def); + } + + template <> + inline float NodeContext::get_attribute(const std::string& name, const float& def) const + { + return node.get_float(name, def); + } + + template <> + inline std::string NodeContext::get_attribute(const std::string& name, + const std::string& def) const + { + return node.get_str(name, def); + } + + template <> + inline std::vector + NodeContext::get_attribute(const std::string& name, + const std::vector& def) const + { + return node.get_ints(name, def); + } + + template <> + inline std::vector + NodeContext::get_attribute(const std::string& name, + const std::vector& def) const + { + return node.get_floats(name, def); + } + + template <> + inline bool NodeContext::get_attribute(const std::string& name, const bool& def) const + { + return node.get_bool(name, def); + } + + template <> + inline ngraph::element::Type + NodeContext::get_attribute(const std::string& name, + const ngraph::element::Type& def) const + { + return node.get_dtype(name, def); + } + + inline ngraph::element::Type + NodeContext::get_out_port_type(const std::string& port_name) const + { + auto types = get_out_port_types(port_name); + PDPD_ASSERT(types.size() > 0, "Port has no tensors connected."); + PDPD_ASSERT(std::equal(types.begin() + 1, types.end(), types.begin()), + "Port has tensors with different types connected."); + return types[0]; + } + + inline NamedOutputs NodeContext::default_single_output_mapping( + const std::shared_ptr& ngraph_node, + const std::vector& required_pdpd_out_names) const + { + NamedOutputs named_outputs; + const auto& ngraph_outputs = ngraph_node->outputs(); + const auto& pdpd_op_output_names = this->get_output_names(); + PDPD_ASSERT(ngraph_outputs.size() == 1, "nGraph node must have exactly one output"); + for (const auto& pdpd_name : pdpd_op_output_names) + { + if (std::find(required_pdpd_out_names.begin(), + required_pdpd_out_names.end(), + pdpd_name) != required_pdpd_out_names.end()) + named_outputs[pdpd_name] = {ngraph_outputs[0]}; + } + return named_outputs; + } + template <> + inline std::vector + NodeContext::get_attribute(const std::string& name, + const std::vector& def) const + { + return node.get_longs(name, def); + } + + template <> + inline int64_t NodeContext::get_attribute(const std::string& name, + const int64_t& def) const + { + return node.get_long(name, def); + } + + } // namespace pdpd + } // namespace frontend } // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/op/argmax.cpp b/ngraph/frontend/paddlepaddle/src/op/argmax.cpp index 02a266d3931c9c..b62fa4ea4edfa5 100644 --- a/ngraph/frontend/paddlepaddle/src/op/argmax.cpp +++ b/ngraph/frontend/paddlepaddle/src/op/argmax.cpp @@ -2,33 +2,47 @@ // SPDX-License-Identifier: Apache-2.0 // -#include #include "argmax.hpp" +#include -namespace ngraph { -namespace frontend { -namespace pdpd { -namespace op { +using namespace ngraph; +using namespace ngraph::frontend; -NamedOutputs argmax (const NodeContext& node) { - auto data = node.get_ng_input("X"); - bool flatten = node.get_attribute("flatten"); - const element::Type& index_element_type = element::i64; - const Output k = ngraph::opset6::Constant::create(ngraph::element::i64, {}, {1}); +namespace pdpd +{ + namespace op + { + NamedOutputs argmax(const NodeContext& node) + { + auto data = node.get_ng_input("X"); + bool flatten = node.get_attribute("flatten"); + const element::Type& index_element_type = element::i64; + const Output k = opset6::Constant::create(element::i64, {}, {1}); - if(!flatten) { - auto axis = node.get_attribute("axis"); - const auto axis_to_remove = ngraph::opset6::Constant::create(element::u64, Shape{}, {axis}); - auto node_topk = std::make_shared(data, k, axis, "max", "index", index_element_type); - const auto reshaped_indices = std::make_shared(node_topk->output(1), axis_to_remove); - return node.default_single_output_mapping({std::make_shared(reshaped_indices, element::i64)}, {"Out"}); - } else { - int64_t axis = 0; - const Output reshape_flatten = ngraph::opset6::Constant::create(ngraph::element::i64, {1}, {-1}); - auto node_reshape = std::make_shared(data, reshape_flatten, true); - auto node_topk = std::make_shared(node_reshape, k, axis, "max", "index", index_element_type); - return node.default_single_output_mapping({std::make_shared(node_topk->output(1), element::i64)}, {"Out"}); - } -} + if (!flatten) + { + auto axis = node.get_attribute("axis"); + const auto axis_to_remove = opset6::Constant::create(element::u64, Shape{}, {axis}); + auto node_topk = std::make_shared( + data, k, axis, "max", "index", index_element_type); + const auto reshaped_indices = + std::make_shared(node_topk->output(1), axis_to_remove); + return node.default_single_output_mapping( + {std::make_shared(reshaped_indices, element::i64)}, {"Out"}); + } + else + { + int64_t axis = 0; + const Output reshape_flatten = + opset6::Constant::create(element::i64, {1}, {-1}); + auto node_reshape = std::make_shared(data, reshape_flatten, true); + auto node_topk = std::make_shared( + node_reshape, k, axis, "max", "index", index_element_type); + return node.default_single_output_mapping( + {std::make_shared(node_topk->output(1), element::i64)}, + {"Out"}); + } + } -}}}} + } // namespace op +} // namespace pdpd diff --git a/ngraph/frontend/paddlepaddle/src/op/argmax.hpp b/ngraph/frontend/paddlepaddle/src/op/argmax.hpp index 2362a52e1e1130..767e9f75770c4a 100644 --- a/ngraph/frontend/paddlepaddle/src/op/argmax.hpp +++ b/ngraph/frontend/paddlepaddle/src/op/argmax.hpp @@ -5,11 +5,17 @@ #pragma once #include "node_context.hpp" -namespace ngraph { -namespace frontend { -namespace pdpd { -namespace op { +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs argmax(const NodeContext& node); -NamedOutputs argmax (const NodeContext& node); - -}}}} \ No newline at end of file + } + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/assign_value.cpp b/ngraph/frontend/paddlepaddle/src/op/assign_value.cpp index d32a2b96860b31..9d297c635a4e43 100644 --- a/ngraph/frontend/paddlepaddle/src/op/assign_value.cpp +++ b/ngraph/frontend/paddlepaddle/src/op/assign_value.cpp @@ -2,55 +2,61 @@ // SPDX-License-Identifier: Apache-2.0 // -#include #include "assign_value.hpp" -namespace ngraph { - namespace frontend { - namespace pdpd { - namespace op { - - NamedOutputs assign_value (const NodeContext& node) { - - std::vector shape = node.get_attribute>("shape"); - auto dtype = node.get_attribute("dtype"); - std::shared_ptr const_node; +#include - switch (dtype) { - case element::i32: - { - auto values = node.get_attribute>("int32_values"); - const_node = {opset6::Constant::create(dtype, Shape{shape.begin(), shape.end()}, values)}; - break; - } - case element::f32: - { - std::vector values = node.get_attribute>("fp32_values"); - const_node = {opset6::Constant::create(dtype, Shape{shape.begin(), shape.end()}, values)}; - break; - } - case element::boolean: - { - auto values = node.get_attribute>("bool_values"); - const_node = {opset6::Constant::create(dtype, Shape{shape.begin(), shape.end()}, values)}; - break; - } - case element::i64: - { - auto values = node.get_attribute>("int64_values"); - const_node = {opset6::Constant::create(dtype, Shape{shape.begin(), shape.end()}, values)}; - break; - } - default: - { - PDPD_ASSERT(false, "assign_value only supports int32, int64, float32, bool"); - break; - } - } +using namespace ngraph; +using namespace ngraph::frontend; - return node.default_single_output_mapping({const_node}, {"Out"}); - } +namespace pdpd +{ + namespace op + { + NamedOutputs assign_value(const NodeContext& node) + { + std::vector shape = node.get_attribute>("shape"); + auto dtype = node.get_attribute("dtype"); + std::shared_ptr const_node; + switch (dtype) + { + case element::i32: + { + auto values = node.get_attribute>("int32_values"); + const_node = { + opset6::Constant::create(dtype, Shape{shape.begin(), shape.end()}, values)}; + break; + } + case element::f32: + { + std::vector values = node.get_attribute>("fp32_values"); + const_node = { + opset6::Constant::create(dtype, Shape{shape.begin(), shape.end()}, values)}; + break; + } + case element::boolean: + { + auto values = node.get_attribute>("bool_values"); + const_node = { + opset6::Constant::create(dtype, Shape{shape.begin(), shape.end()}, values)}; + break; } + case element::i64: + { + auto values = node.get_attribute>("int64_values"); + const_node = { + opset6::Constant::create(dtype, Shape{shape.begin(), shape.end()}, values)}; + break; + } + default: + { + PDPD_ASSERT(false, "assign_value only supports int32, int64, float32, bool"); + break; + } + } + + return node.default_single_output_mapping({const_node}, {"Out"}); } - } -} + + } // namespace op +} // namespace pdpd diff --git a/ngraph/frontend/paddlepaddle/src/op/assign_value.hpp b/ngraph/frontend/paddlepaddle/src/op/assign_value.hpp index 032aba1b838212..eebba5c917bc05 100644 --- a/ngraph/frontend/paddlepaddle/src/op/assign_value.hpp +++ b/ngraph/frontend/paddlepaddle/src/op/assign_value.hpp @@ -6,14 +6,17 @@ #include "node_context.hpp" -namespace ngraph { - namespace frontend { - namespace pdpd { - namespace op { - - NamedOutputs assign_value (const NodeContext &node); +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs assign_value(const NodeContext& node); } - } - } -} + } // namespace pdpd + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/op/batch_norm.cpp b/ngraph/frontend/paddlepaddle/src/op/batch_norm.cpp index 4de03e78c9f12c..2d133d5ff5b72d 100644 --- a/ngraph/frontend/paddlepaddle/src/op/batch_norm.cpp +++ b/ngraph/frontend/paddlepaddle/src/op/batch_norm.cpp @@ -14,22 +14,28 @@ // limitations under the License. //***************************************************************************** -#include #include "batch_norm.hpp" +#include -namespace ngraph { -namespace frontend { -namespace pdpd { -namespace op { +using namespace ngraph; +using namespace ngraph::frontend; -NamedOutputs batch_norm (const NodeContext& node) { - auto data = node.get_ng_input("X"); - auto gamma = node.get_ng_input("Scale"); - auto beta = node.get_ng_input("Bias"); - auto mean = node.get_ng_input("Mean"); - auto variance = node.get_ng_input("Variance"); - return node.default_single_output_mapping({std::make_shared( - data, gamma, beta, mean, variance, node.get_attribute("epsilon"))}, {"Y"}); -} +namespace pdpd +{ + namespace op + { + NamedOutputs batch_norm(const NodeContext& node) + { + auto data = node.get_ng_input("X"); + auto gamma = node.get_ng_input("Scale"); + auto beta = node.get_ng_input("Bias"); + auto mean = node.get_ng_input("Mean"); + auto variance = node.get_ng_input("Variance"); + return node.default_single_output_mapping( + {std::make_shared( + data, gamma, beta, mean, variance, node.get_attribute("epsilon"))}, + {"Y"}); + } -}}}} \ No newline at end of file + } // namespace op +} // namespace pdpd \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/batch_norm.hpp b/ngraph/frontend/paddlepaddle/src/op/batch_norm.hpp index 2f94d12272448f..4452844e58a4b6 100644 --- a/ngraph/frontend/paddlepaddle/src/op/batch_norm.hpp +++ b/ngraph/frontend/paddlepaddle/src/op/batch_norm.hpp @@ -17,11 +17,17 @@ #pragma once #include "node_context.hpp" -namespace ngraph { -namespace frontend { -namespace pdpd { -namespace op { +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs batch_norm(const NodeContext& node); -NamedOutputs batch_norm (const NodeContext& node); - -}}}} \ No newline at end of file + } + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/cast.cpp b/ngraph/frontend/paddlepaddle/src/op/cast.cpp index 519505d8082b2f..f9d9ec8362e83b 100644 --- a/ngraph/frontend/paddlepaddle/src/op/cast.cpp +++ b/ngraph/frontend/paddlepaddle/src/op/cast.cpp @@ -14,19 +14,24 @@ // limitations under the License. //***************************************************************************** -#include #include "cast.hpp" +#include + +using namespace ngraph; +using namespace ngraph::frontend; -namespace ngraph { -namespace frontend { -namespace pdpd { -namespace op { +namespace pdpd +{ + namespace op + { + NamedOutputs cast(const NodeContext& node) + { + auto data = node.get_ng_input("X"); + auto out_dtype = node.get_attribute("out_dtype"); -NamedOutputs cast (const NodeContext& node) { - auto data = node.get_ng_input("X"); - auto out_dtype = node.get_attribute("out_dtype"); - - return node.default_single_output_mapping({std::make_shared(data, out_dtype)}, {"Out"}); -} + return node.default_single_output_mapping( + {std::make_shared(data, out_dtype)}, {"Out"}); + } -}}}} \ No newline at end of file + } // namespace op +} // namespace pdpd \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/cast.hpp b/ngraph/frontend/paddlepaddle/src/op/cast.hpp index 8cf320cec750dc..af321b7e443ad2 100644 --- a/ngraph/frontend/paddlepaddle/src/op/cast.hpp +++ b/ngraph/frontend/paddlepaddle/src/op/cast.hpp @@ -17,11 +17,17 @@ #pragma once #include "node_context.hpp" -namespace ngraph { -namespace frontend { -namespace pdpd { -namespace op { +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs cast(const NodeContext& node); -NamedOutputs cast (const NodeContext& node); - -}}}} \ No newline at end of file + } + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/clip.cpp b/ngraph/frontend/paddlepaddle/src/op/clip.cpp index a67a81de3a6389..fd3c8ae603f888 100644 --- a/ngraph/frontend/paddlepaddle/src/op/clip.cpp +++ b/ngraph/frontend/paddlepaddle/src/op/clip.cpp @@ -2,21 +2,26 @@ // SPDX-License-Identifier: Apache-2.0 // -#include #include "clip.hpp" +#include #include -namespace ngraph { -namespace frontend { -namespace pdpd { -namespace op { +using namespace ngraph; +using namespace ngraph::frontend; -NamedOutputs clip (const NodeContext& node) { - auto data = node.get_ng_input("X"); - auto min = node.get_attribute("min"); - auto max = node.get_attribute("max"); - PDPD_ASSERT(max >= min, "clip: max value must greater than min value!"); - return node.default_single_output_mapping({std::make_shared(data, min, max)}, {"Out"}); -} +namespace pdpd +{ + namespace op + { + NamedOutputs clip(const NodeContext& node) + { + auto data = node.get_ng_input("X"); + auto min = node.get_attribute("min"); + auto max = node.get_attribute("max"); + PDPD_ASSERT(max >= min, "clip: max value must greater than min value!"); + return node.default_single_output_mapping( + {std::make_shared(data, min, max)}, {"Out"}); + } -}}}} \ No newline at end of file + } // namespace op +} // namespace pdpd \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/clip.hpp b/ngraph/frontend/paddlepaddle/src/op/clip.hpp index 366893e2ed16b9..bfb1eb4999f039 100644 --- a/ngraph/frontend/paddlepaddle/src/op/clip.hpp +++ b/ngraph/frontend/paddlepaddle/src/op/clip.hpp @@ -5,11 +5,17 @@ #pragma once #include "node_context.hpp" -namespace ngraph { -namespace frontend { -namespace pdpd { -namespace op { +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs clip(const NodeContext& node); -NamedOutputs clip (const NodeContext& node); - -}}}} \ No newline at end of file + } + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/concat.cpp b/ngraph/frontend/paddlepaddle/src/op/concat.cpp index d26de758c45bcc..ac94a8eacd1347 100644 --- a/ngraph/frontend/paddlepaddle/src/op/concat.cpp +++ b/ngraph/frontend/paddlepaddle/src/op/concat.cpp @@ -14,18 +14,23 @@ // limitations under the License. //***************************************************************************** -#include #include "concat.hpp" +#include -namespace ngraph { -namespace frontend { -namespace pdpd { -namespace op { +using namespace ngraph; +using namespace ngraph::frontend; -NamedOutputs concat (const NodeContext& node) { - auto data = node.get_ng_inputs("X"); - auto axis = node.get_attribute("axis"); - return node.default_single_output_mapping({std::make_shared(data, axis)}, {"Out"}); -} +namespace pdpd +{ + namespace op + { + NamedOutputs concat(const NodeContext& node) + { + auto data = node.get_ng_inputs("X"); + auto axis = node.get_attribute("axis"); + return node.default_single_output_mapping( + {std::make_shared(data, axis)}, {"Out"}); + } -}}}} \ No newline at end of file + } // namespace op +} // namespace pdpd \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/concat.hpp b/ngraph/frontend/paddlepaddle/src/op/concat.hpp index 61f815f90214bf..d21d874a8f11b1 100644 --- a/ngraph/frontend/paddlepaddle/src/op/concat.hpp +++ b/ngraph/frontend/paddlepaddle/src/op/concat.hpp @@ -17,11 +17,17 @@ #pragma once #include "node_context.hpp" -namespace ngraph { -namespace frontend { -namespace pdpd { -namespace op { +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs concat(const NodeContext& node); -NamedOutputs concat (const NodeContext& node); - -}}}} \ No newline at end of file + } + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/conv2d.cpp b/ngraph/frontend/paddlepaddle/src/op/conv2d.cpp index 6e0d40ba2aead1..cab137b4fbfa36 100644 --- a/ngraph/frontend/paddlepaddle/src/op/conv2d.cpp +++ b/ngraph/frontend/paddlepaddle/src/op/conv2d.cpp @@ -14,28 +14,34 @@ // limitations under the License. //***************************************************************************** -#include #include "conv2d.hpp" +#include -namespace ngraph { -namespace frontend { -namespace pdpd { -namespace op { +using namespace ngraph; +using namespace ngraph::frontend; -NamedOutputs conv2d (const NodeContext& node) { - auto data = node.get_ng_input("Input"); - auto filter = node.get_ng_input("Filter"); - // TODO: resolve padding according to spec - auto strides = node.get_attribute>("strides"); - auto paddings = node.get_attribute>("paddings"); - auto dilations = node.get_attribute>("dilations"); - return node.default_single_output_mapping({std::make_shared( - data, - filter, - ngraph::Strides(strides.begin(), strides.end()), - ngraph::CoordinateDiff(paddings.begin(), paddings.end()), - ngraph::CoordinateDiff(paddings.begin(), paddings.end()), - ngraph::Strides(dilations.begin(), dilations.end()))}, {"Output"}); -} +namespace pdpd +{ + namespace op + { + NamedOutputs conv2d(const NodeContext& node) + { + auto data = node.get_ng_input("Input"); + auto filter = node.get_ng_input("Filter"); + // TODO: resolve padding according to spec + auto strides = node.get_attribute>("strides"); + auto paddings = node.get_attribute>("paddings"); + auto dilations = node.get_attribute>("dilations"); + return node.default_single_output_mapping( + {std::make_shared( + data, + filter, + Strides(strides.begin(), strides.end()), + CoordinateDiff(paddings.begin(), paddings.end()), + CoordinateDiff(paddings.begin(), paddings.end()), + Strides(dilations.begin(), dilations.end()))}, + {"Output"}); + } -}}}} \ No newline at end of file + } // namespace op +} // namespace pdpd \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/conv2d.hpp b/ngraph/frontend/paddlepaddle/src/op/conv2d.hpp index c46a87631d0394..00325514778acb 100644 --- a/ngraph/frontend/paddlepaddle/src/op/conv2d.hpp +++ b/ngraph/frontend/paddlepaddle/src/op/conv2d.hpp @@ -17,11 +17,17 @@ #pragma once #include "node_context.hpp" -namespace ngraph { -namespace frontend { -namespace pdpd { -namespace op { +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs conv2d(const NodeContext& node_context); -NamedOutputs conv2d (const NodeContext& node_context); - -}}}} + } + } // namespace pdpd + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/op/elementwise_ops.cpp b/ngraph/frontend/paddlepaddle/src/op/elementwise_ops.cpp index cc5fcfa8446f24..2bce2909e6be42 100644 --- a/ngraph/frontend/paddlepaddle/src/op/elementwise_ops.cpp +++ b/ngraph/frontend/paddlepaddle/src/op/elementwise_ops.cpp @@ -18,69 +18,87 @@ #include #include "elementwise_ops.hpp" -namespace ngraph { -namespace frontend { -namespace pdpd { -namespace op { +using namespace ngraph; +using namespace ngraph::frontend; -template -NamedOutputs elementwise_ops (const NodeContext& node) { - auto x = node.get_ng_input("X"); - auto y = node.get_ng_input("Y"); +namespace pdpd +{ + namespace op + { + template + NamedOutputs elementwise_ops(const NodeContext& node) + { + auto x = node.get_ng_input("X"); + auto y = node.get_ng_input("Y"); - auto axis = node.get_attribute("axis"); + auto axis = node.get_attribute("axis"); - PDPD_ASSERT(x.get_partial_shape().rank().is_static(), "elementwise_ops: X rank must be static!"); - PDPD_ASSERT(y.get_partial_shape().rank().is_static(), "elementwise_ops: Y rank must be static!"); - int64_t x_rank = x.get_partial_shape().rank().get_length(); - int64_t y_rank = y.get_partial_shape().rank().get_length(); + PDPD_ASSERT(x.get_partial_shape().rank().is_static(), + "elementwise_ops: X rank must be static!"); + PDPD_ASSERT(y.get_partial_shape().rank().is_static(), + "elementwise_ops: Y rank must be static!"); + int64_t x_rank = x.get_partial_shape().rank().get_length(); + int64_t y_rank = y.get_partial_shape().rank().get_length(); - if ((axis == -1) || (axis == x_rank - 1) || (x_rank == y_rank)) { - return node.default_single_output_mapping({std::make_shared(x, y)}, {"Out"}); - } - else { - // This broadcast can be implemented by either ngraph::Reshape or ngraph::Broadcast. - // Since PDPD implicates y_shape is a subsequence of x_shape starting from axis, - // to use ngraph::Reshape like Paddle2ONNX, which is more friendly to PnP. - auto broadcast_shape = std::vector(x_rank, 1); - PartialShape y_shape = y.get_partial_shape(); - int32_t i = 0; - for(auto it = y_shape.begin(); it != y_shape.end(); ++i,++it) - broadcast_shape[axis+i] = (*it).get_length(); + if ((axis == -1) || (axis == x_rank - 1) || (x_rank == y_rank)) + { + return node.default_single_output_mapping({std::make_shared(x, y)}, {"Out"}); + } + else + { + // This broadcast can be implemented by either ngraph::Reshape or + // ngraph::Broadcast. Since PDPD implicates y_shape is a subsequence of + // x_shape starting from axis, to use ngraph::Reshape like Paddle2ONNX, + // which is more friendly to PnP. + auto broadcast_shape = std::vector(x_rank, 1); + PartialShape y_shape = y.get_partial_shape(); + int32_t i = 0; + for (auto it = y_shape.begin(); it != y_shape.end(); ++i, ++it) + broadcast_shape[axis + i] = (*it).get_length(); - auto reshape_node = ngraph::opset6::Constant::create(ngraph::element::i64, ngraph::Shape{broadcast_shape.size()}, broadcast_shape); - auto y_node = std::make_shared(y, reshape_node, false); - return node.default_single_output_mapping({std::make_shared(x, y_node)}, {"Out"}); - } -} + auto reshape_node = opset6::Constant::create( + element::i64, Shape{broadcast_shape.size()}, broadcast_shape); + auto y_node = std::make_shared(y, reshape_node, false); + return node.default_single_output_mapping({std::make_shared(x, y_node)}, + {"Out"}); + } + } -// -NamedOutputs elementwise_add (const NodeContext& node_context) { - return elementwise_ops(node_context); -} + // + NamedOutputs elementwise_add(const NodeContext& node_context) + { + return elementwise_ops(node_context); + } -NamedOutputs elementwise_sub (const NodeContext& node_context) { - return elementwise_ops(node_context); -} + NamedOutputs elementwise_sub(const NodeContext& node_context) + { + return elementwise_ops(node_context); + } -NamedOutputs elementwise_mul (const NodeContext& node_context) { - return elementwise_ops(node_context); -} + NamedOutputs elementwise_mul(const NodeContext& node_context) + { + return elementwise_ops(node_context); + } -NamedOutputs elementwise_div (const NodeContext& node_context) { - return elementwise_ops(node_context); -} + NamedOutputs elementwise_div(const NodeContext& node_context) + { + return elementwise_ops(node_context); + } -NamedOutputs elementwise_min (const NodeContext& node_context) { - return elementwise_ops(node_context); -} + NamedOutputs elementwise_min(const NodeContext& node_context) + { + return elementwise_ops(node_context); + } -NamedOutputs elementwise_max (const NodeContext& node_context) { - return elementwise_ops(node_context); -} + NamedOutputs elementwise_max(const NodeContext& node_context) + { + return elementwise_ops(node_context); + } -NamedOutputs elementwise_pow (const NodeContext& node_context) { - return elementwise_ops(node_context); -} + NamedOutputs elementwise_pow(const NodeContext& node_context) + { + return elementwise_ops(node_context); + } -}}}} \ No newline at end of file + } // namespace op +} // namespace pdpd \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/elementwise_ops.hpp b/ngraph/frontend/paddlepaddle/src/op/elementwise_ops.hpp index fc32c514c35b35..3137cf23006cfb 100644 --- a/ngraph/frontend/paddlepaddle/src/op/elementwise_ops.hpp +++ b/ngraph/frontend/paddlepaddle/src/op/elementwise_ops.hpp @@ -17,16 +17,22 @@ #pragma once #include "node_context.hpp" -namespace ngraph { -namespace frontend { -namespace pdpd { -namespace op { - -NamedOutputs elementwise_add (const NodeContext& node_context); -NamedOutputs elementwise_sub (const NodeContext& node_context); -NamedOutputs elementwise_mul (const NodeContext& node_context); -NamedOutputs elementwise_div (const NodeContext& node_context); -NamedOutputs elementwise_min (const NodeContext& node_context); -NamedOutputs elementwise_max (const NodeContext& node_context); -NamedOutputs elementwise_pow (const NodeContext& node_context); -}}}} +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs elementwise_add(const NodeContext& node_context); + NamedOutputs elementwise_sub(const NodeContext& node_context); + NamedOutputs elementwise_mul(const NodeContext& node_context); + NamedOutputs elementwise_div(const NodeContext& node_context); + NamedOutputs elementwise_min(const NodeContext& node_context); + NamedOutputs elementwise_max(const NodeContext& node_context); + NamedOutputs elementwise_pow(const NodeContext& node_context); + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/op/fill_constant.cpp b/ngraph/frontend/paddlepaddle/src/op/fill_constant.cpp index 27a8230f6ea298..c48054b043ec8d 100644 --- a/ngraph/frontend/paddlepaddle/src/op/fill_constant.cpp +++ b/ngraph/frontend/paddlepaddle/src/op/fill_constant.cpp @@ -2,32 +2,37 @@ // SPDX-License-Identifier: Apache-2.0 // -#include #include "fill_constant.hpp" +#include +using namespace ngraph; +using namespace ngraph::frontend; -namespace ngraph { - namespace frontend { - namespace pdpd { - namespace op { - - NamedOutputs fill_constant (const NodeContext& node) { - - auto shape = node.get_attribute>("shape"); - auto dtype = node.get_attribute("dtype"); - //TODO to Support Tensor/Tuple Input add more tests for other data types #55262 - Output value_node; - if(dtype == element::i32) { - int32_t value = node.get_attribute("value"); - value_node = opset6::Constant::create(dtype, {1}, {value}); - } - else if(dtype == element::f32) { - float value = node.get_attribute("value"); - value_node = opset6::Constant::create(dtype, {1}, {value}); - } +namespace pdpd +{ + namespace op + { + NamedOutputs fill_constant(const NodeContext& node) + { + auto shape = node.get_attribute>("shape"); + auto dtype = node.get_attribute("dtype"); + // TODO to Support Tensor/Tuple Input add more tests for other data types #55262 + Output value_node; + if (dtype == element::i32) + { + int32_t value = node.get_attribute("value"); + value_node = opset6::Constant::create(dtype, {1}, {value}); + } + else if (dtype == element::f32) + { + float value = node.get_attribute("value"); + value_node = opset6::Constant::create(dtype, {1}, {value}); + } - auto shape_node = opset6::Constant::create(element::i64, {shape.size()}, shape); - return node.default_single_output_mapping({std::make_shared(value_node, shape_node)}, {"Out"}); - } + auto shape_node = opset6::Constant::create(element::i64, {shape.size()}, shape); + return node.default_single_output_mapping( + {std::make_shared(value_node, shape_node)}, {"Out"}); + } - }}}} \ No newline at end of file + } // namespace op +} // namespace pdpd \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/fill_constant.hpp b/ngraph/frontend/paddlepaddle/src/op/fill_constant.hpp index cae9ca4f413dc6..d260b6333d8870 100644 --- a/ngraph/frontend/paddlepaddle/src/op/fill_constant.hpp +++ b/ngraph/frontend/paddlepaddle/src/op/fill_constant.hpp @@ -6,14 +6,17 @@ #include "node_context.hpp" -namespace ngraph { - namespace frontend { - namespace pdpd { - namespace op { - - NamedOutputs fill_constant(const NodeContext &node); +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs fill_constant(const NodeContext& node); } - } - } -} + } // namespace pdpd + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/op/fill_constant_batch_size_like.cpp b/ngraph/frontend/paddlepaddle/src/op/fill_constant_batch_size_like.cpp index a3748b231d1b1f..c8cbe7fac6e2f4 100644 --- a/ngraph/frontend/paddlepaddle/src/op/fill_constant_batch_size_like.cpp +++ b/ngraph/frontend/paddlepaddle/src/op/fill_constant_batch_size_like.cpp @@ -2,32 +2,42 @@ // SPDX-License-Identifier: Apache-2.0 // -#include #include "fill_constant_batch_size_like.hpp" +#include #include -namespace ngraph { -namespace frontend { -namespace pdpd { -namespace op { +using namespace ngraph; +using namespace ngraph::frontend; -NamedOutputs fill_constant_batch_size_like (const NodeContext& node) { - //TODO to Support other data types other than FP32 #55263 - auto input_dim_idx = node.get_attribute("input_dim_idx", 0); - auto output_dim_idx = node.get_attribute("output_dim_idx", 0); - auto value = node.get_attribute("value"); - auto shapes = node.get_attribute >("shape"); - auto input = node.get_ng_input("Input"); - auto partial_shape = input.get_partial_shape(); - PDPD_ASSERT(partial_shape.is_static(), "fill_constant_batch_size_like: must use static shape."); - auto static_shape = partial_shape.get_shape(); - PDPD_ASSERT(input_dim_idx < (int32_t)static_shape.size(), "fill_constant_batch_size_like: input_dim_idx should not exceed input dims."); - PDPD_ASSERT(output_dim_idx < (int32_t)shapes.size(), "fill_constant_batch_size_like: output_dim_idx should not exceed shapes dims."); - shapes[output_dim_idx] = static_shape[input_dim_idx]; - auto dtype = node.get_attribute("dtype"); - return node.default_single_output_mapping( - {std::make_shared(dtype, Shape(shapes.begin(), shapes.end()), value)}, - {"Out"}); -} +namespace pdpd +{ + namespace op + { + NamedOutputs fill_constant_batch_size_like(const NodeContext& node) + { + // TODO to Support other data types other than FP32 #55263 + auto input_dim_idx = node.get_attribute("input_dim_idx", 0); + auto output_dim_idx = node.get_attribute("output_dim_idx", 0); + auto value = node.get_attribute("value"); + auto shapes = node.get_attribute>("shape"); + auto input = node.get_ng_input("Input"); + auto partial_shape = input.get_partial_shape(); + PDPD_ASSERT(partial_shape.is_static(), + "fill_constant_batch_size_like: must use static shape."); + auto static_shape = partial_shape.get_shape(); + PDPD_ASSERT(input_dim_idx < (int32_t)static_shape.size(), + "fill_constant_batch_size_like: input_dim_idx should not exceed " + "input dims."); + PDPD_ASSERT(output_dim_idx < (int32_t)shapes.size(), + "fill_constant_batch_size_like: output_dim_idx should not exceed " + "shapes dims."); + shapes[output_dim_idx] = static_shape[input_dim_idx]; + auto dtype = node.get_attribute("dtype"); + return node.default_single_output_mapping( + {std::make_shared( + dtype, Shape(shapes.begin(), shapes.end()), value)}, + {"Out"}); + } -}}}} \ No newline at end of file + } // namespace op +} // namespace pdpd \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/fill_constant_batch_size_like.hpp b/ngraph/frontend/paddlepaddle/src/op/fill_constant_batch_size_like.hpp index 89ac5baea63029..9cbd62b7a03ee4 100644 --- a/ngraph/frontend/paddlepaddle/src/op/fill_constant_batch_size_like.hpp +++ b/ngraph/frontend/paddlepaddle/src/op/fill_constant_batch_size_like.hpp @@ -5,11 +5,17 @@ #pragma once #include "node_context.hpp" -namespace ngraph { -namespace frontend { -namespace pdpd { -namespace op { +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs fill_constant_batch_size_like(const NodeContext& node); -NamedOutputs fill_constant_batch_size_like (const NodeContext& node); - -}}}} \ No newline at end of file + } + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/flatten_contiguous_range.cpp b/ngraph/frontend/paddlepaddle/src/op/flatten_contiguous_range.cpp index 18f8e5a930f43a..a66132b13a0788 100644 --- a/ngraph/frontend/paddlepaddle/src/op/flatten_contiguous_range.cpp +++ b/ngraph/frontend/paddlepaddle/src/op/flatten_contiguous_range.cpp @@ -2,39 +2,50 @@ // SPDX-License-Identifier: Apache-2.0 // -#include #include "flatten_contiguous_range.hpp" -#include #include +#include +#include -namespace ngraph { -namespace frontend { -namespace pdpd { -namespace op { +using namespace ngraph; +using namespace ngraph::frontend; -NamedOutputs flatten_contiguous_range (const NodeContext& node) { - auto x_node = node.get_ng_input("X"); - auto shape_of_x = std::make_shared(x_node); - int dims = x_node.get_partial_shape().rank().get_length(); - auto start_axis = node.get_attribute("start_axis"); - auto stop_axis = node.get_attribute("stop_axis"); +namespace pdpd +{ + namespace op + { + NamedOutputs flatten_contiguous_range(const NodeContext& node) + { + auto x_node = node.get_ng_input("X"); + auto shape_of_x = std::make_shared(x_node); + int dims = x_node.get_partial_shape().rank().get_length(); + auto start_axis = node.get_attribute("start_axis"); + auto stop_axis = node.get_attribute("stop_axis"); - auto axis1_begin = opset6::Constant::create(element::i64, {1}, {0}); - auto axis1_end = opset6::Constant::create(element::i64, {1}, {start_axis}); - auto axis1 = std::make_shared(shape_of_x, axis1_begin, axis1_end, std::vector{0}, std::vector{0}); - OutputVector axes {axis1, opset6::Constant::create(element::i64, Shape{1}, {-1.0})}; + auto axis1_begin = opset6::Constant::create(element::i64, {1}, {0}); + auto axis1_end = opset6::Constant::create(element::i64, {1}, {start_axis}); + auto axis1 = std::make_shared(shape_of_x, + axis1_begin, + axis1_end, + std::vector{0}, + std::vector{0}); + OutputVector axes{axis1, opset6::Constant::create(element::i64, Shape{1}, {-1.0})}; - if (stop_axis < dims - 1) { - auto axis2_begin = opset6::Constant::create(element::i64, {1}, {stop_axis + 1}); - auto axis2_end = opset6::Constant::create(element::i64, {1}, {dims}); - auto axis2_node = std::make_shared(shape_of_x, axis2_begin, axis2_end, std::vector{0}, std::vector{0}); - axes.push_back(axis2_node); - } + if (stop_axis < dims - 1) + { + auto axis2_begin = opset6::Constant::create(element::i64, {1}, {stop_axis + 1}); + auto axis2_end = opset6::Constant::create(element::i64, {1}, {dims}); + auto axis2_node = std::make_shared(shape_of_x, + axis2_begin, + axis2_end, + std::vector{0}, + std::vector{0}); + axes.push_back(axis2_node); + } - auto new_shape_node = std::make_shared(axes, 0); - return node.default_single_output_mapping({std::make_shared(x_node, new_shape_node, true)}, {"Out"}); -} -} -} -} -} + auto new_shape_node = std::make_shared(axes, 0); + return node.default_single_output_mapping( + {std::make_shared(x_node, new_shape_node, true)}, {"Out"}); + } + } // namespace op +} // namespace pdpd diff --git a/ngraph/frontend/paddlepaddle/src/op/flatten_contiguous_range.hpp b/ngraph/frontend/paddlepaddle/src/op/flatten_contiguous_range.hpp index 85d1bac3649f48..46fcb42b51f98a 100644 --- a/ngraph/frontend/paddlepaddle/src/op/flatten_contiguous_range.hpp +++ b/ngraph/frontend/paddlepaddle/src/op/flatten_contiguous_range.hpp @@ -5,11 +5,17 @@ #pragma once #include "node_context.hpp" -namespace ngraph { -namespace frontend { -namespace pdpd { -namespace op { +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs flatten_contiguous_range(const NodeContext& node); -NamedOutputs flatten_contiguous_range (const NodeContext& node); - -}}}} + } + } // namespace pdpd + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/op/interp.cpp b/ngraph/frontend/paddlepaddle/src/op/interp.cpp index 5c10a418b98ae9..788040a894d6fb 100644 --- a/ngraph/frontend/paddlepaddle/src/op/interp.cpp +++ b/ngraph/frontend/paddlepaddle/src/op/interp.cpp @@ -2,145 +2,165 @@ // SPDX-License-Identifier: Apache-2.0 // -#include #include "interp.hpp" +#include + +using namespace ngraph; +using namespace ngraph::frontend; + +namespace pdpd +{ + namespace op + { + std::shared_ptr calculate_output_shape_based_on_scales( + const Output& data, const std::vector& scale, Output& scales) + { + PDPD_ASSERT(scale.size() > 0); + if (scale.size() == 1) + scales = opset6::Constant::create( + element::f32, Shape{4}, {1, 1, scale[0], scale[0]}); + else if (scale.size() == 2) + scales = opset6::Constant::create( + element::f32, Shape{4}, {1, 1, scale[0], scale[1]}); + else if (scale.size() == 3) + scales = opset6::Constant::create( + element::f32, Shape{4}, {1, scale[0], scale[1], scale[2]}); + else + scales = + opset6::Constant::create(element::f32, + Shape{scale.size()}, + std::vector(scale.begin(), scale.end())); + const auto shape_of_data = std::make_shared( + std::make_shared(data), scales.get_element_type()); + const auto multiply = std::make_shared(shape_of_data, scales); + const auto output_shape = std::make_shared(multiply, element::i64); + + return output_shape; + } + + std::shared_ptr calculate_scales_based_on_sizes(const Output& data, + const Output& sizes) + { + const float epsilon = 1.0e-5; + const auto shape_of_data = std::make_shared( + std::make_shared(data), element::f32); + const auto converted_sizes = std::make_shared(sizes, element::f32); + const auto divide = std::make_shared(converted_sizes, shape_of_data); + const auto eps_node = + std::make_shared(element::f32, Shape{}, epsilon); + const auto scales = std::make_shared(divide, eps_node); + + return scales; + } + + std::shared_ptr extract_out_sizes(const Output& data, + const std::vector& out_sizes) + { + const auto shape_of_x = std::make_shared(data); + auto shape_begin = opset6::Constant::create(element::i64, {1}, {0}); + auto shape_end = opset6::Constant::create(element::i64, Shape{1}, {2}); + auto nc_node = std::make_shared(shape_of_x, + shape_begin, + shape_end, + std::vector{0}, + std::vector{0}); + auto hw_node = opset6::Constant::create(element::i64, Shape{2}, out_sizes); + return std::make_shared(OutputVector{nc_node, hw_node}, 0); + } + + // TODO support different data_layout #55170 + + NamedOutputs interpolate(const NodeContext& node, + const opset6::Interpolate::InterpolateMode& mode) + { + auto x = node.get_ng_input("X"); + using InterpolateMode = opset6::Interpolate::InterpolateMode; + using CoordinateTransformMode = opset6::Interpolate::CoordinateTransformMode; + using Nearest_mode = opset6::Interpolate::NearestMode; + using InterpolateAttrs = opset6::Interpolate::InterpolateAttrs; + using ShapeCalcMode = opset6::Interpolate::ShapeCalcMode; + + InterpolateAttrs attrs; + + attrs.mode = mode; + + auto out_w = node.get_attribute("out_w"); + auto out_h = node.get_attribute("out_h"); + auto scale = node.get_attribute>("scale"); + Output scales; + Output target_spatial_shape; + + if (node.has_ng_input("OutSize")) + { + attrs.shape_calculation_mode = ShapeCalcMode::sizes; + auto hw_shape = node.get_ng_input("OutSize"); + const auto shape_of_x = std::make_shared(x); + auto shape_begin = opset6::Constant::create(element::i64, {1}, {0}); + auto shape_end = opset6::Constant::create(element::i64, Shape{1}, {2}); + auto nc_node = std::make_shared(shape_of_x, + shape_begin, + shape_end, + std::vector{0}, + std::vector{0}); + target_spatial_shape = std::make_shared( + OutputVector{nc_node, + std::make_shared(hw_shape, element::i64)}, + 0); + scales = calculate_scales_based_on_sizes(x, target_spatial_shape); + } + else if (out_w <= 0 || out_h <= 0) + { + attrs.shape_calculation_mode = ShapeCalcMode::scales; + target_spatial_shape = calculate_output_shape_based_on_scales(x, scale, scales); + } + else + { + attrs.shape_calculation_mode = ShapeCalcMode::sizes; + target_spatial_shape = extract_out_sizes(x, {out_h, out_w}); + scales = calculate_scales_based_on_sizes(x, target_spatial_shape); + } -namespace ngraph { - namespace frontend { - namespace pdpd { - namespace op { - - std::shared_ptr - calculate_output_shape_based_on_scales(const Output &data, - const std::vector &scale, - Output &scales) { - PDPD_ASSERT(scale.size() > 0); - if (scale.size() == 1) - scales = opset6::Constant::create(element::f32, Shape{4}, {1, 1, scale[0], scale[0]}); - else if (scale.size() == 2) - scales = opset6::Constant::create(element::f32, Shape{4}, {1, 1, scale[0], scale[1]}); - else if (scale.size() == 3) - scales = opset6::Constant::create(element::f32, Shape{4}, - {1, scale[0], scale[1], scale[2]}); - else - scales = opset6::Constant::create(element::f32, Shape{scale.size()}, - std::vector(scale.begin(), scale.end())); - const auto shape_of_data = std::make_shared( - std::make_shared(data), scales.get_element_type()); - const auto multiply = - std::make_shared(shape_of_data, scales); - const auto output_shape = - std::make_shared(multiply, ngraph::element::i64); - - return output_shape; - } - - std::shared_ptr - calculate_scales_based_on_sizes(const Output &data, - const Output &sizes) { - const float epsilon = 1.0e-5; - const auto shape_of_data = std::make_shared( - std::make_shared(data), ngraph::element::f32); - const auto converted_sizes = - std::make_shared(sizes, ngraph::element::f32); - const auto divide = - std::make_shared(converted_sizes, shape_of_data); - const auto eps_node = std::make_shared( - ngraph::element::f32, Shape{}, epsilon); - const auto scales = std::make_shared(divide, eps_node); - - return scales; - } - - std::shared_ptr - extract_out_sizes(const Output &data, const std::vector &out_sizes) { - const auto shape_of_x = std::make_shared(data); - auto shape_begin = opset6::Constant::create(element::i64, {1}, {0}); - auto shape_end = opset6::Constant::create(element::i64, Shape{1}, {2}); - auto nc_node = std::make_shared(shape_of_x, shape_begin, shape_end, - std::vector{0}, - std::vector{0}); - auto hw_node = opset6::Constant::create(element::i64, Shape{2}, out_sizes); - return std::make_shared(OutputVector{nc_node, hw_node}, 0); - } - - - //TODO support different data_layout #55170 - - NamedOutputs - interpolate(const NodeContext &node, const ngraph::opset6::Interpolate::InterpolateMode &mode) { - auto x = node.get_ng_input("X"); - using InterpolateMode = ngraph::opset6::Interpolate::InterpolateMode; - using CoordinateTransformMode = ngraph::opset6::Interpolate::CoordinateTransformMode; - using Nearest_mode = ngraph::opset6::Interpolate::NearestMode; - using InterpolateAttrs = ngraph::opset6::Interpolate::InterpolateAttrs; - using ShapeCalcMode = ngraph::opset6::Interpolate::ShapeCalcMode; - - InterpolateAttrs attrs; - - attrs.mode = mode; - - auto out_w = node.get_attribute("out_w"); - auto out_h = node.get_attribute("out_h"); - auto scale = node.get_attribute>("scale"); - Output scales; - Output target_spatial_shape; - - if (node.has_ng_input("OutSize")) { - attrs.shape_calculation_mode = ShapeCalcMode::sizes; - auto hw_shape = node.get_ng_input("OutSize"); - const auto shape_of_x = std::make_shared(x); - auto shape_begin = opset6::Constant::create(element::i64, {1}, {0}); - auto shape_end = opset6::Constant::create(element::i64, Shape{1}, {2}); - auto nc_node = std::make_shared(shape_of_x, shape_begin, shape_end, - std::vector{0}, - std::vector{0}); - target_spatial_shape = std::make_shared( - OutputVector{nc_node, std::make_shared(hw_shape, element::i64)}, 0); - scales = calculate_scales_based_on_sizes(x, target_spatial_shape); - } else if (out_w <= 0 || out_h <= 0) { - attrs.shape_calculation_mode = ShapeCalcMode::scales; - target_spatial_shape = calculate_output_shape_based_on_scales(x, scale, scales); - } else { - attrs.shape_calculation_mode = ShapeCalcMode::sizes; - target_spatial_shape = extract_out_sizes(x, {out_h, out_w}); - scales = calculate_scales_based_on_sizes(x, target_spatial_shape); - } - - bool align_corners = node.get_attribute("align_corners"); - int32_t align_mode = node.get_attribute("align_mode"); - - if (mode == InterpolateMode::nearest) { - attrs.coordinate_transformation_mode = CoordinateTransformMode::asymmetric; - } else if (!align_corners && align_mode == 1) { - attrs.coordinate_transformation_mode = CoordinateTransformMode::asymmetric; - } else if (!align_corners && align_mode == 0) { - attrs.coordinate_transformation_mode = CoordinateTransformMode::half_pixel; - } else if (align_corners) { - attrs.coordinate_transformation_mode = CoordinateTransformMode::align_corners; - } - - attrs.nearest_mode = Nearest_mode::round_prefer_floor; - attrs.antialias = false; - attrs.pads_begin = {0, 0, 0, 0}; - attrs.pads_end = {0, 0, 0, 0}; - - return node.default_single_output_mapping( - {std::make_shared(x, target_spatial_shape, scales, attrs)}, - {"Out"}); - } - - NamedOutputs bilinear_interp_v2(const NodeContext &node) { - auto mode = ngraph::opset6::Interpolate::InterpolateMode::linear_onnx; - return interpolate(node, mode); - } - - NamedOutputs nearest_interp_v2(const NodeContext &node) { - auto mode = ngraph::opset6::Interpolate::InterpolateMode::nearest; - return interpolate(node, mode); - } + bool align_corners = node.get_attribute("align_corners"); + int32_t align_mode = node.get_attribute("align_mode"); + if (mode == InterpolateMode::nearest) + { + attrs.coordinate_transformation_mode = CoordinateTransformMode::asymmetric; + } + else if (!align_corners && align_mode == 1) + { + attrs.coordinate_transformation_mode = CoordinateTransformMode::asymmetric; + } + else if (!align_corners && align_mode == 0) + { + attrs.coordinate_transformation_mode = CoordinateTransformMode::half_pixel; } + else if (align_corners) + { + attrs.coordinate_transformation_mode = CoordinateTransformMode::align_corners; + } + + attrs.nearest_mode = Nearest_mode::round_prefer_floor; + attrs.antialias = false; + attrs.pads_begin = {0, 0, 0, 0}; + attrs.pads_end = {0, 0, 0, 0}; + + return node.default_single_output_mapping( + {std::make_shared(x, target_spatial_shape, scales, attrs)}, + {"Out"}); } - } -} \ No newline at end of file + + NamedOutputs bilinear_interp_v2(const NodeContext& node) + { + auto mode = opset6::Interpolate::InterpolateMode::linear_onnx; + return interpolate(node, mode); + } + + NamedOutputs nearest_interp_v2(const NodeContext& node) + { + auto mode = opset6::Interpolate::InterpolateMode::nearest; + return interpolate(node, mode); + } + + } // namespace op +} // namespace pdpd \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/interp.hpp b/ngraph/frontend/paddlepaddle/src/op/interp.hpp index c47ba9d041dde7..5738a6c2be266a 100644 --- a/ngraph/frontend/paddlepaddle/src/op/interp.hpp +++ b/ngraph/frontend/paddlepaddle/src/op/interp.hpp @@ -5,12 +5,19 @@ #pragma once #include "node_context.hpp" -namespace ngraph { -namespace frontend { -namespace pdpd { -namespace op { -//TODO support other interp such as linear trilinear, bicubic. etc #55397 -NamedOutputs nearest_interp_v2 (const NodeContext& node_context); -NamedOutputs bilinear_interp_v2 (const NodeContext& node_context); +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + // TODO support other interp such as linear trilinear, bicubic. etc #55397 + NamedOutputs nearest_interp_v2(const NodeContext& node_context); + NamedOutputs bilinear_interp_v2(const NodeContext& node_context); -}}}} + } // namespace op + } // namespace pdpd + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/op/leakyrelu.cpp b/ngraph/frontend/paddlepaddle/src/op/leakyrelu.cpp index 325a000d0e6da5..b5a3f4bed06e4a 100644 --- a/ngraph/frontend/paddlepaddle/src/op/leakyrelu.cpp +++ b/ngraph/frontend/paddlepaddle/src/op/leakyrelu.cpp @@ -14,18 +14,24 @@ // limitations under the License. //***************************************************************************** -#include #include "leakyrelu.hpp" +#include -namespace ngraph { -namespace frontend { -namespace pdpd { -namespace op { +using namespace ngraph; +using namespace ngraph::frontend; -NamedOutputs leaky_relu (const NodeContext& node) { - auto data = node.get_ng_input("X"); - auto alpha = ngraph::opset6::Constant::create(ngraph::element::f32, {1}, {node.get_attribute("alpha")}); - return node.default_single_output_mapping({std::make_shared(data, alpha)}, {"Out"}); -} +namespace pdpd +{ + namespace op + { + NamedOutputs leaky_relu(const NodeContext& node) + { + auto data = node.get_ng_input("X"); + auto alpha = + opset6::Constant::create(element::f32, {1}, {node.get_attribute("alpha")}); + return node.default_single_output_mapping( + {std::make_shared(data, alpha)}, {"Out"}); + } -}}}} \ No newline at end of file + } // namespace op +} // namespace pdpd \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/leakyrelu.hpp b/ngraph/frontend/paddlepaddle/src/op/leakyrelu.hpp index b165e319c596c2..f85bdc38fef5c6 100644 --- a/ngraph/frontend/paddlepaddle/src/op/leakyrelu.hpp +++ b/ngraph/frontend/paddlepaddle/src/op/leakyrelu.hpp @@ -17,11 +17,17 @@ #pragma once #include "node_context.hpp" -namespace ngraph { -namespace frontend { -namespace pdpd { -namespace op { +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs leaky_relu(const NodeContext& node); -NamedOutputs leaky_relu (const NodeContext& node); - -}}}} \ No newline at end of file + } + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/matmul.cpp b/ngraph/frontend/paddlepaddle/src/op/matmul.cpp index c4efcf605b718b..a97a35ea233cae 100644 --- a/ngraph/frontend/paddlepaddle/src/op/matmul.cpp +++ b/ngraph/frontend/paddlepaddle/src/op/matmul.cpp @@ -14,27 +14,29 @@ // limitations under the License. //***************************************************************************** -#include #include "matmul.hpp" +#include #include -namespace ngraph { -namespace frontend { -namespace pdpd { -namespace op { +using namespace ngraph; +using namespace ngraph::frontend; - NamedOutputs matmul(const NodeContext& node) { - auto x = node.get_ng_input("X"); - auto y = node.get_ng_input("Y"); - auto alpha = node.get_attribute("alpha"); - auto transpose_a = node.get_attribute("transpose_a"); - auto transpose_b = node.get_attribute("transpose_b"); - auto mm = std::make_shared(x, y, transpose_a, transpose_b); - auto alpha_node = ngraph::opset6::Constant::create(ngraph::element::f32, {1}, {alpha}); - return node.default_single_output_mapping({std::make_shared(mm, alpha_node)}, {"Out"}); - } +namespace pdpd +{ + namespace op + { + NamedOutputs matmul(const NodeContext& node) + { + auto x = node.get_ng_input("X"); + auto y = node.get_ng_input("Y"); + auto alpha = node.get_attribute("alpha"); + auto transpose_a = node.get_attribute("transpose_a"); + auto transpose_b = node.get_attribute("transpose_b"); + auto mm = std::make_shared(x, y, transpose_a, transpose_b); + auto alpha_node = opset6::Constant::create(element::f32, {1}, {alpha}); + return node.default_single_output_mapping( + {std::make_shared(mm, alpha_node)}, {"Out"}); + } -} // namespace op -} // namespace pdpd -} // namespace frontend -} // namespace ngraph \ No newline at end of file + } // namespace op +} // namespace pdpd \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/matmul.hpp b/ngraph/frontend/paddlepaddle/src/op/matmul.hpp index 7f441bb1dea551..ef523b24b88392 100644 --- a/ngraph/frontend/paddlepaddle/src/op/matmul.hpp +++ b/ngraph/frontend/paddlepaddle/src/op/matmul.hpp @@ -17,14 +17,17 @@ #pragma once #include "node_context.hpp" -namespace ngraph { -namespace frontend { -namespace pdpd { -namespace op { +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs matmul(const NodeContext& node); -NamedOutputs matmul(const NodeContext& node); - -} // namespace op -} // namespace pdpd -} // namespace frontend + } // namespace op + } // namespace pdpd + } // namespace frontend } // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/mul.cpp b/ngraph/frontend/paddlepaddle/src/op/mul.cpp index a9ccfc244b0ea6..412f746e4e8d58 100644 --- a/ngraph/frontend/paddlepaddle/src/op/mul.cpp +++ b/ngraph/frontend/paddlepaddle/src/op/mul.cpp @@ -14,46 +14,55 @@ // limitations under the License. //***************************************************************************** +#include "mul.hpp" #include #include -#include "mul.hpp" - -namespace ngraph { -namespace frontend { -namespace pdpd { -namespace op { -NamedOutputs mul (const NodeContext& node) { - auto x = node.get_ng_input("X"); - auto y = node.get_ng_input("Y"); - PDPD_ASSERT(x.get_partial_shape().rank().is_static(), "matmul: X rank must be static!"); - int64_t x_rank = x.get_partial_shape().rank().get_length(); - PDPD_ASSERT(y.get_partial_shape().rank().is_static() && - y.get_partial_shape().rank().get_length() == 2, "matmul: Y rank must be static, and 2!"); - if (x_rank > 2) { - auto shape = std::make_shared(x); - int64_t x_num_col_dims = node.get_attribute("x_num_col_dims"); - auto axis = ngraph::opset6::Constant::create(ngraph::element::i64, {}, {0}); - auto split_lengths = ngraph::opset6::Constant::create(ngraph::element::i64, {2}, - {x_num_col_dims, x_rank - x_num_col_dims}); - auto split = std::make_shared(shape, axis, split_lengths); - auto f_dim_red_axis = ngraph::opset6::Constant::create(ngraph::element::i64, {}, {0}); - auto first_dim_reduce = std::make_shared(split->output(0), - f_dim_red_axis); - auto f_dim_shape = ngraph::opset6::Constant::create(ngraph::element::i64, {1}, {1}); - auto first_dim = std::make_shared(first_dim_reduce, f_dim_shape, false); - auto s_dim_red_axis = ngraph::opset6::Constant::create(ngraph::element::i64, {}, {0}); - auto second_dim_reduce = std::make_shared(split->output(1), - s_dim_red_axis); - auto s_dim_shape = ngraph::opset6::Constant::create(ngraph::element::i64, {1}, {1}); - auto second_dim = std::make_shared(second_dim_reduce, s_dim_shape, false); - auto out_shape = std::make_shared(ngraph::NodeVector{first_dim, second_dim}, - 0); - auto x_reshaped = std::make_shared(x, out_shape, false); - return node.default_single_output_mapping({std::make_shared(x_reshaped, y)}, {"Out"}); - } - return node.default_single_output_mapping({std::make_shared(x, y)}, {"Out"}); +using namespace ngraph; +using namespace ngraph::frontend; -} +namespace pdpd +{ + namespace op + { + NamedOutputs mul(const NodeContext& node) + { + auto x = node.get_ng_input("X"); + auto y = node.get_ng_input("Y"); + PDPD_ASSERT(x.get_partial_shape().rank().is_static(), "matmul: X rank must be static!"); + int64_t x_rank = x.get_partial_shape().rank().get_length(); + PDPD_ASSERT(y.get_partial_shape().rank().is_static() && + y.get_partial_shape().rank().get_length() == 2, + "matmul: Y rank must be static, and 2!"); + if (x_rank > 2) + { + auto shape = std::make_shared(x); + int64_t x_num_col_dims = node.get_attribute("x_num_col_dims"); + auto axis = opset6::Constant::create(element::i64, {}, {0}); + auto split_lengths = opset6::Constant::create( + element::i64, {2}, {x_num_col_dims, x_rank - x_num_col_dims}); + auto split = std::make_shared(shape, axis, split_lengths); + auto f_dim_red_axis = opset6::Constant::create(element::i64, {}, {0}); + auto first_dim_reduce = + std::make_shared(split->output(0), f_dim_red_axis); + auto f_dim_shape = opset6::Constant::create(element::i64, {1}, {1}); + auto first_dim = + std::make_shared(first_dim_reduce, f_dim_shape, false); + auto s_dim_red_axis = opset6::Constant::create(element::i64, {}, {0}); + auto second_dim_reduce = + std::make_shared(split->output(1), s_dim_red_axis); + auto s_dim_shape = opset6::Constant::create(element::i64, {1}, {1}); + auto second_dim = + std::make_shared(second_dim_reduce, s_dim_shape, false); + auto out_shape = + std::make_shared(NodeVector{first_dim, second_dim}, 0); + auto x_reshaped = std::make_shared(x, out_shape, false); + return node.default_single_output_mapping( + {std::make_shared(x_reshaped, y)}, {"Out"}); + } + return node.default_single_output_mapping({std::make_shared(x, y)}, + {"Out"}); + } -}}}} \ No newline at end of file + } // namespace op +} // namespace pdpd \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/mul.hpp b/ngraph/frontend/paddlepaddle/src/op/mul.hpp index 61b34411070d8e..abf1bdeabc96c7 100644 --- a/ngraph/frontend/paddlepaddle/src/op/mul.hpp +++ b/ngraph/frontend/paddlepaddle/src/op/mul.hpp @@ -17,11 +17,17 @@ #pragma once #include "node_context.hpp" -namespace ngraph { -namespace frontend { -namespace pdpd { -namespace op { +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs mul(const NodeContext& node); -NamedOutputs mul (const NodeContext& node); - -}}}} \ No newline at end of file + } + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/multiclass_nms.cpp b/ngraph/frontend/paddlepaddle/src/op/multiclass_nms.cpp index afbac9efb17caf..995c02a9378d44 100644 --- a/ngraph/frontend/paddlepaddle/src/op/multiclass_nms.cpp +++ b/ngraph/frontend/paddlepaddle/src/op/multiclass_nms.cpp @@ -14,31 +14,41 @@ // limitations under the License. //***************************************************************************** -#include #include "multiclass_nms.hpp" +#include -namespace ngraph { -namespace frontend { -namespace pdpd { -namespace op { +using namespace ngraph; +using namespace ngraph::frontend; -NamedOutputs multiclass_nms (const NodeContext& node) { - auto bboxes = node.get_ng_input("BBoxes"); - auto scores = node.get_ng_input("Scores"); +namespace pdpd +{ + namespace op + { + NamedOutputs multiclass_nms(const NodeContext& node) + { + auto bboxes = node.get_ng_input("BBoxes"); + auto scores = node.get_ng_input("Scores"); - auto score_threshold = node.get_attribute("score_threshold"); - auto iou_threshold = node.get_attribute("nms_threshold"); - auto max_output_boxes_per_class = node.get_attribute("nms_top_k"); + auto score_threshold = node.get_attribute("score_threshold"); + auto iou_threshold = node.get_attribute("nms_threshold"); + auto max_output_boxes_per_class = node.get_attribute("nms_top_k"); - //TODO: dtype, scaler/vector attr, and more strick attributes check - auto node_max_output_boxes_per_class = ngraph::opset6::Constant::create(element::i32, Shape{1}, {max_output_boxes_per_class}); - auto node_iou_threshold = ngraph::opset6::Constant::create(element::f32, Shape{1}, {iou_threshold}); - auto node_score_threshold = ngraph::opset6::Constant::create(element::f32, Shape{1}, {score_threshold}); + // TODO: dtype, scaler/vector attr, and more strick attributes check + auto node_max_output_boxes_per_class = + opset6::Constant::create(element::i32, Shape{1}, {max_output_boxes_per_class}); + auto node_iou_threshold = + opset6::Constant::create(element::f32, Shape{1}, {iou_threshold}); + auto node_score_threshold = + opset6::Constant::create(element::f32, Shape{1}, {score_threshold}); - return node.default_single_output_mapping({std::make_shared(bboxes, scores, - node_max_output_boxes_per_class, - node_iou_threshold, - node_score_threshold)}, {"Out"}); -} + return node.default_single_output_mapping( + {std::make_shared(bboxes, + scores, + node_max_output_boxes_per_class, + node_iou_threshold, + node_score_threshold)}, + {"Out"}); + } -}}}} \ No newline at end of file + } // namespace op +} // namespace pdpd diff --git a/ngraph/frontend/paddlepaddle/src/op/multiclass_nms.hpp b/ngraph/frontend/paddlepaddle/src/op/multiclass_nms.hpp index ea17a302f89080..3f5c1c9083b4ce 100644 --- a/ngraph/frontend/paddlepaddle/src/op/multiclass_nms.hpp +++ b/ngraph/frontend/paddlepaddle/src/op/multiclass_nms.hpp @@ -17,11 +17,17 @@ #pragma once #include "node_context.hpp" -namespace ngraph { -namespace frontend { -namespace pdpd { -namespace op { +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs multiclass_nms(const NodeContext& node); -NamedOutputs multiclass_nms (const NodeContext& node); - -}}}} \ No newline at end of file + } + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/pad3d.cpp b/ngraph/frontend/paddlepaddle/src/op/pad3d.cpp index 9011b0da7d6d17..2d21d0a982ed8e 100644 --- a/ngraph/frontend/paddlepaddle/src/op/pad3d.cpp +++ b/ngraph/frontend/paddlepaddle/src/op/pad3d.cpp @@ -2,89 +2,108 @@ // SPDX-License-Identifier: Apache-2.0 // -#include #include "pad3d.hpp" +#include #include -namespace ngraph { -namespace frontend { -namespace pdpd { -namespace op { - -NamedOutputs pad3d (const NodeContext& node) { - auto data = node.get_ng_input("X"); - auto mode = node.get_attribute("mode"); - auto value = node.get_attribute("value", 0.0); - auto data_format = node.get_attribute("data_format"); - - auto paddings = std::vector(6, 0); - - //TODO: Only functional support Int padding format, further verify in #55169 - if (node.has_attribute>("paddings")) { - auto paddings_vector = node.get_attribute>("paddings"); - PDPD_ASSERT(paddings_vector.size() == 6, "paddings Params size should be 6 in pad3d!"); - paddings = paddings_vector; - } else if (node.has_attribute("paddings")) { - auto padding_int = node.get_attribute("paddings"); - for (int i = 0; i < 6; i++) - paddings[i] = padding_int; - } else { - throw ngraph::ngraph_error("Unsupported paddings attribute!"); - } +using namespace ngraph; +using namespace ngraph::frontend; - auto pads_begin = std::vector(5, 0); - auto pads_end = std::vector(5, 0); +namespace pdpd +{ + namespace op + { + NamedOutputs pad3d(const NodeContext& node) + { + auto data = node.get_ng_input("X"); + auto mode = node.get_attribute("mode"); + auto value = node.get_attribute("value", 0.0); + auto data_format = node.get_attribute("data_format"); + auto paddings = std::vector(6, 0); - Output values; - Output padding_begin; - Output padding_end; + // TODO: Only functional support Int padding format, further verify in #55169 + if (node.has_attribute>("paddings")) + { + auto paddings_vector = node.get_attribute>("paddings"); + PDPD_ASSERT(paddings_vector.size() == 6, + "paddings Params size should be 6 in pad3d!"); + paddings = paddings_vector; + } + else if (node.has_attribute("paddings")) + { + auto padding_int = node.get_attribute("paddings"); + for (int i = 0; i < 6; i++) + paddings[i] = padding_int; + } + else + { + throw ngraph_error("Unsupported paddings attribute!"); + } - ngraph::op::PadMode pad_mode; - //TODO Support Circular mode in future #55169 - if (mode == "constant") { - pad_mode = ngraph::op::PadMode::CONSTANT; - values = ngraph::opset6::Constant::create( - element::f32, ngraph::Shape{}, {value}); - } else if (mode == "reflect") { - pad_mode = ngraph::op::PadMode::REFLECT; - } else if (mode == "replicate") { - pad_mode = ngraph::op::PadMode::EDGE; - } else { - throw ngraph::ngraph_error("Unsupported 3d paddings mode: [" + mode + "]"); - } + auto pads_begin = std::vector(5, 0); + auto pads_end = std::vector(5, 0); - if (data_format == "NCDHW") { - pads_begin[4] = paddings[0]; //left - pads_end[4] = paddings[1]; //right - pads_begin[3] = paddings[2]; //top - pads_end[3] = paddings[3]; //down - pads_begin[2] = paddings[4]; //front - pads_end[2] = paddings[5]; //back + Output values; + Output padding_begin; + Output padding_end; - } else if (data_format == "NDHWC") { - pads_begin[3] = paddings[0]; //left - pads_end[3] = paddings[1]; //right - pads_begin[2] = paddings[2]; //top - pads_end[2] = paddings[3]; //down - pads_begin[1] = paddings[4]; //front - pads_end[1] = paddings[5]; //back + op::PadMode pad_mode; + // TODO Support Circular mode in future #55169 + if (mode == "constant") + { + pad_mode = op::PadMode::CONSTANT; + values = opset6::Constant::create(element::f32, Shape{}, {value}); + } + else if (mode == "reflect") + { + pad_mode = op::PadMode::REFLECT; + } + else if (mode == "replicate") + { + pad_mode = op::PadMode::EDGE; + } + else + { + throw ngraph_error("Unsupported 3d paddings mode: [" + mode + "]"); + } - } else { - throw ngraph::ngraph_error("Unsupported 3d paddings data_format: [" + data_format + "]"); - } + if (data_format == "NCDHW") + { + pads_begin[4] = paddings[0]; // left + pads_end[4] = paddings[1]; // right + pads_begin[3] = paddings[2]; // top + pads_end[3] = paddings[3]; // down + pads_begin[2] = paddings[4]; // front + pads_end[2] = paddings[5]; // back + } + else if (data_format == "NDHWC") + { + pads_begin[3] = paddings[0]; // left + pads_end[3] = paddings[1]; // right + pads_begin[2] = paddings[2]; // top + pads_end[2] = paddings[3]; // down + pads_begin[1] = paddings[4]; // front + pads_end[1] = paddings[5]; // back + } + else + { + throw ngraph_error("Unsupported 3d paddings data_format: [" + data_format + "]"); + } - padding_begin = ngraph::opset6::Constant::create( - element::i32, ngraph::Shape{pads_begin.size()}, pads_begin); - padding_end = ngraph::opset6::Constant::create( - element::i32, ngraph::Shape{pads_end.size()}, pads_end); + padding_begin = + opset6::Constant::create(element::i32, Shape{pads_begin.size()}, pads_begin); + padding_end = opset6::Constant::create(element::i32, Shape{pads_end.size()}, pads_end); - if (mode == "constant") - return node.default_single_output_mapping({std::make_shared(data, padding_begin, padding_end, values, pad_mode)}, {"Out"}); - else - return node.default_single_output_mapping({std::make_shared(data, padding_begin, padding_end, pad_mode)}, {"Out"}); -} -} -} -} -} + if (mode == "constant") + return node.default_single_output_mapping( + {std::make_shared( + data, padding_begin, padding_end, values, pad_mode)}, + {"Out"}); + else + return node.default_single_output_mapping( + {std::make_shared(data, padding_begin, padding_end, pad_mode)}, + {"Out"}); + } + } // namespace op +} // namespace pdpd diff --git a/ngraph/frontend/paddlepaddle/src/op/pad3d.hpp b/ngraph/frontend/paddlepaddle/src/op/pad3d.hpp index cc1da6edbeea44..da15f027cd894b 100644 --- a/ngraph/frontend/paddlepaddle/src/op/pad3d.hpp +++ b/ngraph/frontend/paddlepaddle/src/op/pad3d.hpp @@ -5,11 +5,17 @@ #pragma once #include "node_context.hpp" -namespace ngraph { -namespace frontend { -namespace pdpd { -namespace op { +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs pad3d(const NodeContext& node); -NamedOutputs pad3d (const NodeContext& node); - -}}}} + } + } // namespace pdpd + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/op/pool2d.cpp b/ngraph/frontend/paddlepaddle/src/op/pool2d.cpp index 81a670d62a33f9..0fbc94100e5fec 100644 --- a/ngraph/frontend/paddlepaddle/src/op/pool2d.cpp +++ b/ngraph/frontend/paddlepaddle/src/op/pool2d.cpp @@ -14,46 +14,57 @@ // limitations under the License. //***************************************************************************** -#include #include "pool2d.hpp" +#include -namespace ngraph { -namespace frontend { -namespace pdpd { -namespace op { +using namespace ngraph; +using namespace ngraph::frontend; -NamedOutputs pool2d (const NodeContext& node) { - // TODO : resolve padding according to spec - auto data = node.get_ng_input("X"); - auto pooling_type = node.get_attribute("pooling_type"); - auto global_pooling = node.get_attribute("global_pooling"); - auto adaptive = node.get_attribute("adaptive"); - auto kernel_shape = node.get_attribute>("ksize"); - if (pooling_type == "max" && !global_pooling) { - auto strides = node.get_attribute>("strides"); - auto paddings = node.get_attribute>("paddings"); - auto rounding_type = node.get_attribute("ceil_mode") - ? ngraph::op::RoundingType::CEIL - : ngraph::op::RoundingType::FLOOR; - return node.default_single_output_mapping({std::make_shared( - data, - ngraph::Strides(strides.begin(), strides.end()), - ngraph::Shape(paddings.begin(), paddings.end()), - ngraph::Shape(paddings.begin(), paddings.end()), - ngraph::Shape(kernel_shape.begin(), kernel_shape.end()), - rounding_type)}, {"Out"}); - } - else if (pooling_type == "avg" && - (global_pooling || (adaptive && all_of(kernel_shape.begin(), - kernel_shape.end(), - [](int32_t s) { return s == 1; })))) +namespace pdpd +{ + namespace op { - // TODO : resolve axes according to rank - auto axes = ngraph::opset6::Constant::create(ngraph::element::i64, {2}, {2, 3}); - return node.default_single_output_mapping({std::make_shared(data, axes, true)}, {"Out"}); - } else { - throw std::runtime_error("Unsupported pooling type"); - } -} + NamedOutputs pool2d(const NodeContext& node) + { + // TODO : resolve padding according to spec + auto data = node.get_ng_input("X"); + auto pooling_type = node.get_attribute("pooling_type"); + auto global_pooling = node.get_attribute("global_pooling"); + auto adaptive = node.get_attribute("adaptive"); + auto kernel_shape = node.get_attribute>("ksize"); + if (pooling_type == "max" && !global_pooling) + { + auto strides = node.get_attribute>("strides"); + auto paddings = node.get_attribute>("paddings"); + auto rounding_type = node.get_attribute("ceil_mode") + ? op::RoundingType::CEIL + : op::RoundingType::FLOOR; + return node.default_single_output_mapping( + {std::make_shared( + data, + Strides(strides.begin(), strides.end()), + Shape(paddings.begin(), paddings.end()), + Shape(paddings.begin(), paddings.end()), + Shape(kernel_shape.begin(), kernel_shape.end()), + rounding_type)}, + {"Out"}); + } + else if (pooling_type == "avg" && + (global_pooling || + (adaptive && all_of(kernel_shape.begin(), kernel_shape.end(), [](int32_t s) { + return s == 1; + })))) + { + // TODO : resolve axes according to rank + auto axes = opset6::Constant::create(element::i64, {2}, {2, 3}); + return node.default_single_output_mapping( + {std::make_shared(data, axes, true)}, {"Out"}); + } + else + { + throw std::runtime_error("Unsupported pooling type"); + } + } -}}}} \ No newline at end of file + } // namespace op +} // namespace pdpd \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/pool2d.hpp b/ngraph/frontend/paddlepaddle/src/op/pool2d.hpp index d5b3fe5fe752d2..1aa3dd98708ead 100644 --- a/ngraph/frontend/paddlepaddle/src/op/pool2d.hpp +++ b/ngraph/frontend/paddlepaddle/src/op/pool2d.hpp @@ -17,11 +17,17 @@ #pragma once #include "node_context.hpp" -namespace ngraph { -namespace frontend { -namespace pdpd { -namespace op { +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs pool2d(const NodeContext& node); -NamedOutputs pool2d (const NodeContext& node); - -}}}} \ No newline at end of file + } + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/relu.cpp b/ngraph/frontend/paddlepaddle/src/op/relu.cpp index d634e191fa52b4..650e61007d49ec 100644 --- a/ngraph/frontend/paddlepaddle/src/op/relu.cpp +++ b/ngraph/frontend/paddlepaddle/src/op/relu.cpp @@ -14,16 +14,21 @@ // limitations under the License. //***************************************************************************** -#include #include "relu.hpp" +#include -namespace ngraph { -namespace frontend { -namespace pdpd { -namespace op { +using namespace ngraph; +using namespace ngraph::frontend; -NamedOutputs relu (const NodeContext& node) { - return node.default_single_output_mapping({std::make_shared(node.get_ng_input("X"))}, {"Out"}); -} +namespace pdpd +{ + namespace op + { + NamedOutputs relu(const NodeContext& node) + { + return node.default_single_output_mapping( + {std::make_shared(node.get_ng_input("X"))}, {"Out"}); + } -}}}} \ No newline at end of file + } // namespace op +} // namespace pdpd \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/relu.hpp b/ngraph/frontend/paddlepaddle/src/op/relu.hpp index aaf17db7785945..fcfc5ba1dc571a 100644 --- a/ngraph/frontend/paddlepaddle/src/op/relu.hpp +++ b/ngraph/frontend/paddlepaddle/src/op/relu.hpp @@ -17,11 +17,17 @@ #pragma once #include "node_context.hpp" -namespace ngraph { -namespace frontend { -namespace pdpd { -namespace op { +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs relu(const NodeContext& node); -NamedOutputs relu (const NodeContext& node); - -}}}} \ No newline at end of file + } + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/reshape2.cpp b/ngraph/frontend/paddlepaddle/src/op/reshape2.cpp index a32982761eb564..a3ed63a3d8abc5 100644 --- a/ngraph/frontend/paddlepaddle/src/op/reshape2.cpp +++ b/ngraph/frontend/paddlepaddle/src/op/reshape2.cpp @@ -14,28 +14,33 @@ // limitations under the License. //***************************************************************************** -#include #include "reshape2.hpp" +#include #include -namespace ngraph { -namespace frontend { -namespace pdpd { -namespace op { - -NamedOutputs reshape2(const NodeContext& node) { - auto data = node.get_ng_input("X"); - if (!node.has_ng_input("Shape") && !node.has_ng_input("ShapeTensor")) +using namespace ngraph; +using namespace ngraph::frontend; + +namespace pdpd +{ + namespace op { - auto shape_attr = node.get_attribute>("shape"); - auto shape_node = ngraph::opset6::Constant::create(ngraph::element::i32, {shape_attr.size()}, shape_attr); - return node.default_single_output_mapping({std::make_shared(data, shape_node, true)}, {"Out"}); - } else { - NOT_IMPLEMENTED("reshape2 with shape as input"); - } -} + NamedOutputs reshape2(const NodeContext& node) + { + auto data = node.get_ng_input("X"); + if (!node.has_ng_input("Shape") && !node.has_ng_input("ShapeTensor")) + { + auto shape_attr = node.get_attribute>("shape"); + auto shape_node = + opset6::Constant::create(element::i32, {shape_attr.size()}, shape_attr); + return node.default_single_output_mapping( + {std::make_shared(data, shape_node, true)}, {"Out"}); + } + else + { + NOT_IMPLEMENTED("reshape2 with shape as input"); + } + } -} // namespace op + } // namespace op } // namespace pdpd -} // namespace frontend -} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/reshape2.hpp b/ngraph/frontend/paddlepaddle/src/op/reshape2.hpp index dbee2507f1b8b7..4c2fbc842c5618 100644 --- a/ngraph/frontend/paddlepaddle/src/op/reshape2.hpp +++ b/ngraph/frontend/paddlepaddle/src/op/reshape2.hpp @@ -17,14 +17,17 @@ #pragma once #include "node_context.hpp" -namespace ngraph { -namespace frontend { -namespace pdpd { -namespace op { - -NamedOutputs reshape2(const NodeContext& node); +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs reshape2(const NodeContext& node); -} // namespace op -} // namespace pdpd -} // namespace frontend + } // namespace op + } // namespace pdpd + } // namespace frontend } // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/scale.cpp b/ngraph/frontend/paddlepaddle/src/op/scale.cpp index c9c3806ad1d945..f5131de19b0b5e 100644 --- a/ngraph/frontend/paddlepaddle/src/op/scale.cpp +++ b/ngraph/frontend/paddlepaddle/src/op/scale.cpp @@ -14,27 +14,38 @@ // limitations under the License. //***************************************************************************** -#include #include "scale.hpp" +#include -namespace ngraph { -namespace frontend { -namespace pdpd { -namespace op { +using namespace ngraph; +using namespace ngraph::frontend; - NamedOutputs scale (const NodeContext& node) { - auto data = node.get_ng_input("X"); - auto scale = ngraph::opset6::Constant::create(ngraph::element::f32, {1}, {node.get_attribute("scale")}); - auto bias = ngraph::opset6::Constant::create(ngraph::element::f32, {1}, {node.get_attribute("bias")}); - auto bias_after_scale = node.get_attribute("bias_after_scale"); - auto fp32_data = std::make_shared(data, element::f32); - if(!bias_after_scale) { - auto node_add = std::make_shared(fp32_data, bias); - return node.default_single_output_mapping({std::make_shared(node_add, scale)}, {"Out"}); - } else { - auto node_multiply = std::make_shared(fp32_data, scale); - return node.default_single_output_mapping({std::make_shared(node_multiply, bias)}, {"Out"}); +namespace pdpd +{ + namespace op + { + NamedOutputs scale(const NodeContext& node) + { + auto data = node.get_ng_input("X"); + auto scale = + opset6::Constant::create(element::f32, {1}, {node.get_attribute("scale")}); + auto bias = + opset6::Constant::create(element::f32, {1}, {node.get_attribute("bias")}); + auto bias_after_scale = node.get_attribute("bias_after_scale"); + auto fp32_data = std::make_shared(data, element::f32); + if (!bias_after_scale) + { + auto node_add = std::make_shared(fp32_data, bias); + return node.default_single_output_mapping( + {std::make_shared(node_add, scale)}, {"Out"}); + } + else + { + auto node_multiply = std::make_shared(fp32_data, scale); + return node.default_single_output_mapping( + {std::make_shared(node_multiply, bias)}, {"Out"}); + } } - } -}}}} \ No newline at end of file + } // namespace op +} // namespace pdpd diff --git a/ngraph/frontend/paddlepaddle/src/op/scale.hpp b/ngraph/frontend/paddlepaddle/src/op/scale.hpp index 7481a677435e7e..72ddffaaab574a 100644 --- a/ngraph/frontend/paddlepaddle/src/op/scale.hpp +++ b/ngraph/frontend/paddlepaddle/src/op/scale.hpp @@ -17,11 +17,17 @@ #pragma once #include "node_context.hpp" -namespace ngraph { -namespace frontend { -namespace pdpd { -namespace op { +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs scale(const NodeContext& node); -NamedOutputs scale (const NodeContext& node); - -}}}} \ No newline at end of file + } + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/slice.cpp b/ngraph/frontend/paddlepaddle/src/op/slice.cpp index d61b92e52dbbc9..5a460957d3949a 100644 --- a/ngraph/frontend/paddlepaddle/src/op/slice.cpp +++ b/ngraph/frontend/paddlepaddle/src/op/slice.cpp @@ -2,44 +2,52 @@ // SPDX-License-Identifier: Apache-2.0 // -#include #include "slice.hpp" +#include #include -namespace ngraph { -namespace frontend { -namespace pdpd { -namespace op { +using namespace ngraph; +using namespace ngraph::frontend; -NamedOutputs slice (const NodeContext& node) { - auto data = node.get_ng_input("Input"); - auto axes = node.get_attribute>("axes"); - // TODO: support tensor type #55266 - auto starts = node.get_attribute>("starts"); - // TODO: support tensor type #55266 - auto ends = node.get_attribute>("ends"); - auto data_rank = data.get_partial_shape().rank(); - size_t shape_size = data_rank.get_length(); - std::vector fixedStarts(shape_size, 0); - std::vector fixedEnds(shape_size, INT_MAX); +namespace pdpd +{ + namespace op + { + NamedOutputs slice(const NodeContext& node) + { + auto data = node.get_ng_input("Input"); + auto axes = node.get_attribute>("axes"); + // TODO: support tensor type #55266 + auto starts = node.get_attribute>("starts"); + // TODO: support tensor type #55266 + auto ends = node.get_attribute>("ends"); + auto data_rank = data.get_partial_shape().rank(); + size_t shape_size = data_rank.get_length(); + std::vector fixedStarts(shape_size, 0); + std::vector fixedEnds(shape_size, INT_MAX); - int n = 0; - for (auto i : axes) { - PDPD_ASSERT(i < (int32_t)shape_size, "slice: axes must be less than the X rank."); - fixedStarts[i] = starts[n]; - fixedEnds[i] = ends[n]; - n++; - } + int n = 0; + for (auto i : axes) + { + PDPD_ASSERT(i < (int32_t)shape_size, "slice: axes must be less than the X rank."); + fixedStarts[i] = starts[n]; + fixedEnds[i] = ends[n]; + n++; + } - auto startsNode = ngraph::opset6::Constant::create(ngraph::element::i32, { shape_size }, fixedStarts); - auto endsNode = ngraph::opset6::Constant::create(ngraph::element::i32, { shape_size }, fixedEnds); - auto stridesNode = ngraph::opset6::Constant::create(ngraph::element::i32, { shape_size }, std::vector(shape_size, 1)); - return node.default_single_output_mapping({std::make_shared(data, - startsNode, - endsNode, - stridesNode, - std::vector(shape_size, 0), - std::vector(shape_size, 0))}, {"Out"}); -} + auto startsNode = opset6::Constant::create(element::i32, {shape_size}, fixedStarts); + auto endsNode = opset6::Constant::create(element::i32, {shape_size}, fixedEnds); + auto stridesNode = opset6::Constant::create( + element::i32, {shape_size}, std::vector(shape_size, 1)); + return node.default_single_output_mapping( + {std::make_shared(data, + startsNode, + endsNode, + stridesNode, + std::vector(shape_size, 0), + std::vector(shape_size, 0))}, + {"Out"}); + } -}}}} \ No newline at end of file + } // namespace op +} // namespace pdpd diff --git a/ngraph/frontend/paddlepaddle/src/op/slice.hpp b/ngraph/frontend/paddlepaddle/src/op/slice.hpp index a763bee622760d..2bce2ac3440e1e 100644 --- a/ngraph/frontend/paddlepaddle/src/op/slice.hpp +++ b/ngraph/frontend/paddlepaddle/src/op/slice.hpp @@ -5,11 +5,17 @@ #pragma once #include "node_context.hpp" -namespace ngraph { -namespace frontend { -namespace pdpd { -namespace op { +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs slice(const NodeContext& node); -NamedOutputs slice (const NodeContext& node); - -}}}} \ No newline at end of file + } + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/softmax.cpp b/ngraph/frontend/paddlepaddle/src/op/softmax.cpp index ea295a69e0fe87..1e116b9c7c7406 100644 --- a/ngraph/frontend/paddlepaddle/src/op/softmax.cpp +++ b/ngraph/frontend/paddlepaddle/src/op/softmax.cpp @@ -14,26 +14,30 @@ // limitations under the License. //***************************************************************************** -#include #include "softmax.hpp" +#include #include -namespace ngraph { -namespace frontend { -namespace pdpd { -namespace op { - NamedOutputs softmax(const NodeContext& node) { - auto data = node.get_ng_input("X"); - auto axis = node.get_attribute("axis"); - if (axis < 0) +using namespace ngraph; +using namespace ngraph::frontend; + +namespace pdpd +{ + namespace op + { + NamedOutputs softmax(const NodeContext& node) { - PDPD_ASSERT(data.get_partial_shape().rank().is_static(), "Softmax rank must be static"); - auto data_rank = data.get_partial_shape().rank().get_length(); - axis = data_rank + axis; + auto data = node.get_ng_input("X"); + auto axis = node.get_attribute("axis"); + if (axis < 0) + { + PDPD_ASSERT(data.get_partial_shape().rank().is_static(), + "Softmax rank must be static"); + auto data_rank = data.get_partial_shape().rank().get_length(); + axis = data_rank + axis; + } + return node.default_single_output_mapping( + {std::make_shared(data, axis)}, {"Out"}); } - return node.default_single_output_mapping({std::make_shared(data, axis)}, {"Out"}); - } -} // namespace op + } // namespace op } // namespace pdpd -} // namespace frontend -} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/softmax.hpp b/ngraph/frontend/paddlepaddle/src/op/softmax.hpp index b2839eccd8d3e9..ac45a47fbceb0f 100644 --- a/ngraph/frontend/paddlepaddle/src/op/softmax.hpp +++ b/ngraph/frontend/paddlepaddle/src/op/softmax.hpp @@ -17,14 +17,17 @@ #pragma once #include "node_context.hpp" -namespace ngraph { -namespace frontend { -namespace pdpd { -namespace op { +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs softmax(const NodeContext& node); -NamedOutputs softmax(const NodeContext& node); - -} // namespace op -} // namespace pdpd -} // namespace frontend + } // namespace op + } // namespace pdpd + } // namespace frontend } // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/split.cpp b/ngraph/frontend/paddlepaddle/src/op/split.cpp index 1eb929f6521f37..bb6b647689e8c2 100644 --- a/ngraph/frontend/paddlepaddle/src/op/split.cpp +++ b/ngraph/frontend/paddlepaddle/src/op/split.cpp @@ -2,37 +2,40 @@ // SPDX-License-Identifier: Apache-2.0 // -#include #include "split.hpp" +#include #include -namespace ngraph { -namespace frontend { -namespace pdpd { -namespace op { - NamedOutputs split(const NodeContext& node) { - using namespace ngraph; - using namespace opset7; - const auto& data = node.get_ng_input("X"); - auto dim = node.get_attribute("axis"); - // todo: 'num' can be list of values, in this case we should create VariadicSplit - // todo: support VariadicSplit - auto num_or_sections = node.get_attribute("num"); - auto axis = std::make_shared(ngraph::element::i32, Shape{}, dim); +using namespace ngraph; +using namespace ngraph::frontend; + +namespace pdpd +{ + namespace op + { + NamedOutputs split(const NodeContext& node) + { + using namespace ngraph; + using namespace opset7; + const auto& data = node.get_ng_input("X"); + auto dim = node.get_attribute("axis"); + // todo: 'num' can be list of values, in this case we should create + // VariadicSplit todo: support VariadicSplit + auto num_or_sections = node.get_attribute("num"); + auto axis = std::make_shared(element::i32, Shape{}, dim); - NamedOutputs named_outputs; - auto split_outputs = std::make_shared(data, axis, num_or_sections)->outputs(); - auto out_names = node.get_output_names(); - PDPD_ASSERT(out_names.size() == 1, "Unexpected number of outputs"); + NamedOutputs named_outputs; + auto split_outputs = std::make_shared(data, axis, num_or_sections)->outputs(); + auto out_names = node.get_output_names(); + PDPD_ASSERT(out_names.size() == 1, "Unexpected number of outputs"); - auto it = std::find(out_names.begin(), out_names.end(), "Out"); - PDPD_ASSERT(it != out_names.end(), "Expected output not found"); - for (const auto& split_output : split_outputs) { - named_outputs[*it].push_back(split_output); + auto it = std::find(out_names.begin(), out_names.end(), "Out"); + PDPD_ASSERT(it != out_names.end(), "Expected output not found"); + for (const auto& split_output : split_outputs) + { + named_outputs[*it].push_back(split_output); + } + return named_outputs; } - return named_outputs; - } -} // namespace op + } // namespace op } // namespace pdpd -} // namespace frontend -} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/split.hpp b/ngraph/frontend/paddlepaddle/src/op/split.hpp index b36e6c3a6b6827..3ae3a40018fcaf 100644 --- a/ngraph/frontend/paddlepaddle/src/op/split.hpp +++ b/ngraph/frontend/paddlepaddle/src/op/split.hpp @@ -4,14 +4,17 @@ #pragma once #include "node_context.hpp" -namespace ngraph { -namespace frontend { -namespace pdpd { -namespace op { +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs split(const NodeContext& node); - NamedOutputs split(const NodeContext& node); - -} // namespace op -} // namespace pdpd -} // namespace frontend + } // namespace op + } // namespace pdpd + } // namespace frontend } // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/squeeze.cpp b/ngraph/frontend/paddlepaddle/src/op/squeeze.cpp index 077b5cb5abd60a..98dfd2b2404070 100644 --- a/ngraph/frontend/paddlepaddle/src/op/squeeze.cpp +++ b/ngraph/frontend/paddlepaddle/src/op/squeeze.cpp @@ -2,27 +2,30 @@ // SPDX-License-Identifier: Apache-2.0 // -#include #include "squeeze.hpp" +#include #include -namespace ngraph { -namespace frontend { -namespace pdpd { -namespace op { +using namespace ngraph; +using namespace ngraph::frontend; -NamedOutputs squeeze (const NodeContext& node) { - auto data = node.get_ng_input("X"); - std::vector axes; - if (node.has_attribute>("axes")) { - axes = node.get_attribute>("axes"); - } +namespace pdpd +{ + namespace op + { + NamedOutputs squeeze(const NodeContext& node) + { + auto data = node.get_ng_input("X"); + std::vector axes; + if (node.has_attribute>("axes")) + { + axes = node.get_attribute>("axes"); + } - auto axesNode = ngraph::opset6::Constant::create(ngraph::element::i32, {axes.size()}, axes); - return node.default_single_output_mapping({std::make_shared(data, axesNode)}, {"Out"}); -} + auto axesNode = opset6::Constant::create(element::i32, {axes.size()}, axes); + return node.default_single_output_mapping( + {std::make_shared(data, axesNode)}, {"Out"}); + } -} // namespace op -} // namespace pdpd -} // namespace frontend -} // namespace ngraph \ No newline at end of file + } // namespace op +} // namespace pdpd \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/squeeze.hpp b/ngraph/frontend/paddlepaddle/src/op/squeeze.hpp index fb778f4a489f3b..c0648573c4e6b9 100644 --- a/ngraph/frontend/paddlepaddle/src/op/squeeze.hpp +++ b/ngraph/frontend/paddlepaddle/src/op/squeeze.hpp @@ -5,11 +5,17 @@ #pragma once #include "node_context.hpp" -namespace ngraph { -namespace frontend { -namespace pdpd { -namespace op { +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs squeeze(const NodeContext& node); -NamedOutputs squeeze (const NodeContext& node); - -}}}} \ No newline at end of file + } + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/transpose2.cpp b/ngraph/frontend/paddlepaddle/src/op/transpose2.cpp index c411ad1ecc0752..50d9a10f1044d5 100644 --- a/ngraph/frontend/paddlepaddle/src/op/transpose2.cpp +++ b/ngraph/frontend/paddlepaddle/src/op/transpose2.cpp @@ -14,27 +14,32 @@ // limitations under the License. //***************************************************************************** -#include #include "transpose2.hpp" +#include -namespace ngraph { -namespace frontend { -namespace pdpd { -namespace op { +using namespace ngraph; +using namespace ngraph::frontend; -NamedOutputs transpose2 (const NodeContext& node) { - auto data = node.get_ng_input("X"); - auto perm = node.get_attribute>("axis"); +namespace pdpd +{ + namespace op + { + NamedOutputs transpose2(const NodeContext& node) + { + auto data = node.get_ng_input("X"); + auto perm = node.get_attribute>("axis"); - auto rank = static_cast(data.get_partial_shape().rank().get_length()); + auto rank = static_cast(data.get_partial_shape().rank().get_length()); - std::cout << perm.size() << std::endl; - std::cout << data.get_partial_shape().rank() << ":" << rank << std::endl; + std::cout << perm.size() << std::endl; + std::cout << data.get_partial_shape().rank() << ":" << rank << std::endl; - PDPD_ASSERT(perm.size() == rank, "transpose2: axis size must equal to data rank!"); + PDPD_ASSERT(perm.size() == rank, "transpose2: axis size must equal to data rank!"); - auto input_order = ngraph::opset6::Constant::create(ngraph::element::i64, {rank}, perm); - return node.default_single_output_mapping({std::make_shared(data, input_order)}, {"Out"}); -} + auto input_order = opset6::Constant::create(element::i64, {rank}, perm); + return node.default_single_output_mapping( + {std::make_shared(data, input_order)}, {"Out"}); + } -}}}} \ No newline at end of file + } // namespace op +} // namespace pdpd diff --git a/ngraph/frontend/paddlepaddle/src/op/transpose2.hpp b/ngraph/frontend/paddlepaddle/src/op/transpose2.hpp index 6284e6b0182d7d..31b6af6944ef14 100644 --- a/ngraph/frontend/paddlepaddle/src/op/transpose2.hpp +++ b/ngraph/frontend/paddlepaddle/src/op/transpose2.hpp @@ -17,11 +17,17 @@ #pragma once #include "node_context.hpp" -namespace ngraph { -namespace frontend { -namespace pdpd { -namespace op { +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs transpose2(const NodeContext& node); -NamedOutputs transpose2 (const NodeContext& node); - -}}}} \ No newline at end of file + } + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/unsqueeze.cpp b/ngraph/frontend/paddlepaddle/src/op/unsqueeze.cpp index 203cfd8bfa9fc2..e620afd1c1186b 100644 --- a/ngraph/frontend/paddlepaddle/src/op/unsqueeze.cpp +++ b/ngraph/frontend/paddlepaddle/src/op/unsqueeze.cpp @@ -1,21 +1,26 @@ // Copyright (C) 2018-2021 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // -#include #include "unsqueeze.hpp" +#include #include -namespace ngraph { -namespace frontend { -namespace pdpd { -namespace op { +using namespace ngraph; +using namespace ngraph::frontend; -NamedOutputs unsqueeze (const NodeContext& node) { - //TODO to support data type other than int32_t #55168 - auto data = node.get_ng_input("X"); - auto axes = node.get_attribute>("axes"); - auto axesNode = ngraph::opset6::Constant::create(ngraph::element::i32, {axes.size()}, axes); - return node.default_single_output_mapping({std::make_shared(data, axesNode)}, {"Out"}); -} +namespace pdpd +{ + namespace op + { + NamedOutputs unsqueeze(const NodeContext& node) + { + // TODO to support data type other than int32_t #55168 + auto data = node.get_ng_input("X"); + auto axes = node.get_attribute>("axes"); + auto axesNode = opset6::Constant::create(element::i32, {axes.size()}, axes); + return node.default_single_output_mapping( + {std::make_shared(data, axesNode)}, {"Out"}); + } -}}}} \ No newline at end of file + } // namespace op +} // namespace pdpd diff --git a/ngraph/frontend/paddlepaddle/src/op/unsqueeze.hpp b/ngraph/frontend/paddlepaddle/src/op/unsqueeze.hpp index 029245e1df54cd..bce9596f6f6c12 100644 --- a/ngraph/frontend/paddlepaddle/src/op/unsqueeze.hpp +++ b/ngraph/frontend/paddlepaddle/src/op/unsqueeze.hpp @@ -5,11 +5,17 @@ #pragma once #include "node_context.hpp" -namespace ngraph { -namespace frontend { -namespace pdpd { -namespace op { +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs unsqueeze(const NodeContext& node); -NamedOutputs unsqueeze (const NodeContext& node); - -}}}} \ No newline at end of file + } + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op/yolo_box.cpp b/ngraph/frontend/paddlepaddle/src/op/yolo_box.cpp index e1280ee7075c51..6b8fd8a2ffe26f 100644 --- a/ngraph/frontend/paddlepaddle/src/op/yolo_box.cpp +++ b/ngraph/frontend/paddlepaddle/src/op/yolo_box.cpp @@ -1,262 +1,329 @@ // Copyright (C) 2018-2021 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // -#include // std::numeric_limits +#include // std::numeric_limits #include #include #include "yolo_box.hpp" -namespace ngraph { -namespace frontend { -namespace pdpd { -namespace op { - using namespace opset6; - using namespace element; - -NamedOutputs yolo_box (const NodeContext& node_context) { - auto data = node_context.get_ng_input("X"); - auto image_size = node_context.get_ng_input("ImgSize"); - - // TODO: add dynamic shape support - #55264 - auto input_shape = data.get_partial_shape(); - uint32_t input_height = input_shape[2].get_length(); - uint32_t input_width = input_shape[3].get_length(); - - int32_t class_num = node_context.get_attribute("class_num"); - // PDPD anchors attribute is of type int32. Convert to float for computing convinient. - auto _anchors = node_context.get_attribute>("anchors"); - std::vector anchors(_anchors.begin(), _anchors.end()); - - uint32_t num_anchors = anchors.size()/2; - - auto default_scale = 1.0f; - auto scale_x_y = node_context.get_attribute("scale_x_y", default_scale); - auto downsample_ratio = node_context.get_attribute("downsample_ratio"); - auto input_size = input_height * downsample_ratio; - - std::vector score_shape {1, input_height * input_width * num_anchors, class_num}; - - auto conf_thresh = node_context.get_attribute("conf_thresh"); - std::vector conf_thresh_mat(score_shape[1], conf_thresh); - - std::cout << "input_height: " << input_height << " input_width: " << input_width << " input_size: " << input_size<< std::endl; - std::cout << "num_anchors: " << num_anchors << " scale_x_y: " << scale_x_y << std::endl; - std::cout << "downsample_ratio: " << downsample_ratio << " conf_thresh: " << conf_thresh << std::endl; - std::cout << "class_num: " << class_num << " image_size: " << image_size << std::endl; - - auto clip_bbox = node_context.get_attribute("clip_bbox"); - - // main X - auto node_x_shape = Constant::create(i64, {5}, - {1, num_anchors, 5 + class_num, input_height, input_width}); +using namespace ngraph; +using namespace ngraph::frontend; + +namespace pdpd +{ + namespace op + { + using namespace opset6; + using namespace element; - auto node_x_reshape = std::make_shared(data, node_x_shape, false); + NamedOutputs yolo_box(const NodeContext& node_context) + { + auto data = node_context.get_ng_input("X"); + auto image_size = node_context.get_ng_input("ImgSize"); - auto node_input_order = Constant::create(i64, {5}, {0, 1, 3, 4, 2}); - auto node_x_transpose = std::make_shared(node_x_reshape, node_input_order); + // TODO: add dynamic shape support - #55264 + auto input_shape = data.get_partial_shape(); + uint32_t input_height = input_shape[2].get_length(); + uint32_t input_width = input_shape[3].get_length(); - // range x/y - std::vector range_x(input_width); - std::iota(range_x.begin(), range_x.end(), 0); - std::vector range_y(input_height); - std::iota(range_y.begin(), range_y.end(), 0); - - auto node_range_x = Constant::create(f32, {1, range_x.size()}, range_x); - auto node_range_y = Constant::create(f32, {range_y.size(), 1}, range_y); - - auto node_range_x_shape = Constant::create(i64, {2}, {1, input_width}); - auto node_range_y_shape = Constant::create(i64, {2}, {input_height, 1}); - - auto node_grid_x = std::make_shared(node_range_x, node_range_y_shape); - auto node_grid_y = std::make_shared(node_range_y, node_range_x_shape); + int32_t class_num = node_context.get_attribute("class_num"); + // PDPD anchors attribute is of type int32. Convert to float for computing + // convinient. + auto _anchors = node_context.get_attribute>("anchors"); + std::vector anchors(_anchors.begin(), _anchors.end()); - // main X (part2) - auto node_split_axis = Constant::create(i64, {1}, {-1}); - auto node_split_lengths = Constant::create(i64, {6}, {1, 1, 1, 1, 1, class_num}); - auto node_split_input = std::make_shared(node_x_transpose, node_split_axis, node_split_lengths); + uint32_t num_anchors = anchors.size() / 2; - auto node_box_x = node_split_input->output(0); - auto node_box_y = node_split_input->output(1); - auto node_box_w = node_split_input->output(2); - auto node_box_h = node_split_input->output(3); - auto node_conf = node_split_input->output(4); - auto node_prob = node_split_input->output(5); + auto default_scale = 1.0f; + auto scale_x_y = node_context.get_attribute("scale_x_y", default_scale); + auto downsample_ratio = node_context.get_attribute("downsample_ratio"); + auto input_size = input_height * downsample_ratio; - // x/y - std::shared_ptr node_box_x_sigmoid = std::make_shared(node_box_x); - std::shared_ptr node_box_y_sigmoid = std::make_shared(node_box_y); + std::vector score_shape{ + 1, input_height * input_width * num_anchors, class_num}; - if (std::fabs(scale_x_y - default_scale) > 1e-6) { //float not-equal - float bias_x_y = -0.5 * (scale_x_y - 1.0); + auto conf_thresh = node_context.get_attribute("conf_thresh"); + std::vector conf_thresh_mat(score_shape[1], conf_thresh); - auto scale_x_y_node = Constant::create(f32, {1}, {scale_x_y}); - auto bias_x_y_node = Constant::create(f32, {1}, {bias_x_y}); + std::cout << "input_height: " << input_height << " input_width: " << input_width + << " input_size: " << input_size << std::endl; + std::cout << "num_anchors: " << num_anchors << " scale_x_y: " << scale_x_y << std::endl; + std::cout << "downsample_ratio: " << downsample_ratio << " conf_thresh: " << conf_thresh + << std::endl; + std::cout << "class_num: " << class_num << " image_size: " << image_size << std::endl; - node_box_x_sigmoid = std::make_shared(node_box_x_sigmoid, scale_x_y_node); - node_box_x_sigmoid = std::make_shared(node_box_x_sigmoid, bias_x_y_node); - - node_box_y_sigmoid = std::make_shared(node_box_y_sigmoid, scale_x_y_node); - node_box_y_sigmoid = std::make_shared(node_box_y_sigmoid, bias_x_y_node); - } + auto clip_bbox = node_context.get_attribute("clip_bbox"); - auto squeeze_box_x = Constant::create(i64, {1}, {4}); - auto node_box_x_squeeze = std::make_shared(node_box_x_sigmoid, squeeze_box_x); + // main X + auto node_x_shape = Constant::create( + i64, {5}, {1, num_anchors, 5 + class_num, input_height, input_width}); - auto squeeze_box_y = Constant::create(i64, {1}, {4}); - auto node_box_y_squeeze = std::make_shared(node_box_y_sigmoid, squeeze_box_y); + auto node_x_reshape = std::make_shared(data, node_x_shape, false); - auto node_box_x_add_grid = std::make_shared(node_grid_x, node_box_x_squeeze); - auto node_box_y_add_grid = std::make_shared(node_grid_y, node_box_y_squeeze); + auto node_input_order = Constant::create(i64, {5}, {0, 1, 3, 4, 2}); + auto node_x_transpose = std::make_shared(node_x_reshape, node_input_order); - auto node_input_h = Constant::create(f32, {1}, {(float)input_height}); - auto node_input_w = Constant::create(f32, {1}, {(float)input_width}); + // range x/y + std::vector range_x(input_width); + std::iota(range_x.begin(), range_x.end(), 0); + std::vector range_y(input_height); + std::iota(range_y.begin(), range_y.end(), 0); - auto node_box_x_encode = std::make_shared(node_box_x_add_grid, node_input_w); - auto node_box_y_encode = std::make_shared(node_box_y_add_grid, node_input_h); + auto node_range_x = Constant::create(f32, {1, range_x.size()}, range_x); + auto node_range_y = Constant::create(f32, {range_y.size(), 1}, range_y); - // w/h - auto node_anchor_tensor = Constant::create(f32, {num_anchors, 2}, anchors); //FIXME:Paddle2ONNX use float! + auto node_range_x_shape = Constant::create(i64, {2}, {1, input_width}); + auto node_range_y_shape = Constant::create(i64, {2}, {input_height, 1}); - auto node_input_size = Constant::create(f32, {1}, {(float)input_size}); - auto node_anchors_div_input_size = std::make_shared(node_anchor_tensor, node_input_size); - - auto split_axis = Constant::create(i32, {}, {1}); - auto node_anchor_split = std::make_shared(node_anchors_div_input_size, split_axis, 2); + auto node_grid_x = std::make_shared(node_range_x, node_range_y_shape); + auto node_grid_y = std::make_shared(node_range_y, node_range_x_shape); - auto node_anchor_w = node_anchor_split->output(0); - auto node_anchor_h = node_anchor_split->output(1); + // main X (part2) + auto node_split_axis = Constant::create(i64, {1}, {-1}); + auto node_split_lengths = + Constant::create(i64, {6}, {1, 1, 1, 1, 1, class_num}); + auto node_split_input = std::make_shared( + node_x_transpose, node_split_axis, node_split_lengths); - auto node_new_anchor_shape = Constant::create(i64, {4}, {1, num_anchors, 1, 1}); - auto node_anchor_w_reshape = std::make_shared(node_anchor_w, node_new_anchor_shape, false); - auto node_anchor_h_reshape = std::make_shared(node_anchor_h, node_new_anchor_shape, false); + auto node_box_x = node_split_input->output(0); + auto node_box_y = node_split_input->output(1); + auto node_box_w = node_split_input->output(2); + auto node_box_h = node_split_input->output(3); + auto node_conf = node_split_input->output(4); + auto node_prob = node_split_input->output(5); - auto squeeze_box_wh = Constant::create(i64, {1}, {4}); - auto node_box_w_squeeze = std::make_shared(node_box_w, squeeze_box_wh); - auto node_box_h_squeeze = std::make_shared(node_box_h, squeeze_box_wh); + // x/y + std::shared_ptr node_box_x_sigmoid = std::make_shared(node_box_x); + std::shared_ptr node_box_y_sigmoid = std::make_shared(node_box_y); - auto node_box_w_exp = std::make_shared(node_box_w_squeeze); - auto node_box_h_exp = std::make_shared(node_box_h_squeeze); + if (std::fabs(scale_x_y - default_scale) > 1e-6) + { // float not-equal + float bias_x_y = -0.5 * (scale_x_y - 1.0); - auto node_box_w_encode = std::make_shared(node_box_w_exp, node_anchor_w_reshape); - auto node_box_h_encode = std::make_shared(node_box_h_exp, node_anchor_h_reshape); + auto scale_x_y_node = Constant::create(f32, {1}, {scale_x_y}); + auto bias_x_y_node = Constant::create(f32, {1}, {bias_x_y}); - // confidence - auto node_conf_sigmoid = std::make_shared(node_conf); + node_box_x_sigmoid = std::make_shared(node_box_x_sigmoid, scale_x_y_node); + node_box_x_sigmoid = std::make_shared(node_box_x_sigmoid, bias_x_y_node); - auto node_conf_thresh = Constant::create(f32, {1, num_anchors, input_height, input_width, 1}, conf_thresh_mat); + node_box_y_sigmoid = std::make_shared(node_box_y_sigmoid, scale_x_y_node); + node_box_y_sigmoid = std::make_shared(node_box_y_sigmoid, bias_x_y_node); + } + + auto squeeze_box_x = Constant::create(i64, {1}, {4}); + auto node_box_x_squeeze = std::make_shared(node_box_x_sigmoid, squeeze_box_x); - auto node_conf_sub = std::make_shared(node_conf_sigmoid, node_conf_thresh); + auto squeeze_box_y = Constant::create(i64, {1}, {4}); + auto node_box_y_squeeze = std::make_shared(node_box_y_sigmoid, squeeze_box_y); - auto node_conf_clip = std::make_shared(node_conf_sub, 0.0f, std::numeric_limits::max()); //FIXME: PDPD not specify min/max + auto node_box_x_add_grid = std::make_shared(node_grid_x, node_box_x_squeeze); + auto node_box_y_add_grid = std::make_shared(node_grid_y, node_box_y_squeeze); - auto node_zeros = Constant::create(f32, {1}, {0}); - auto node_conf_clip_bool = std::make_shared(node_conf_clip, node_zeros); + auto node_input_h = Constant::create(f32, {1}, {(float)input_height}); + auto node_input_w = Constant::create(f32, {1}, {(float)input_width}); - auto node_conf_clip_cast = std::make_shared(node_conf_clip_bool, f32); //FIMXE: to=1 - - auto node_conf_set_zero = std::make_shared(node_conf_sigmoid, node_conf_clip_cast); - - /* probability */ - auto node_prob_sigmoid = std::make_shared(node_prob); + auto node_box_x_encode = std::make_shared(node_box_x_add_grid, node_input_w); + auto node_box_y_encode = std::make_shared(node_box_y_add_grid, node_input_h); - auto node_new_shape = Constant::create(i64, {5}, {1, int(num_anchors), input_height, input_width, 1}); - auto node_conf_new_shape = std::make_shared(node_conf_set_zero, node_new_shape, false); + // w/h + auto node_anchor_tensor = Constant::create( + f32, {num_anchors, 2}, anchors); // FIXME:Paddle2ONNX use float! - // broadcast confidence * probability of each category - auto node_score = std::make_shared(node_prob_sigmoid, node_conf_new_shape); + auto node_input_size = Constant::create(f32, {1}, {(float)input_size}); + auto node_anchors_div_input_size = + std::make_shared(node_anchor_tensor, node_input_size); - // for bbox which has object (greater than threshold) - auto node_conf_bool = std::make_shared(node_conf_new_shape, node_zeros); + auto split_axis = Constant::create(i32, {}, {1}); + auto node_anchor_split = + std::make_shared(node_anchors_div_input_size, split_axis, 2); - auto node_box_x_new_shape = std::make_shared(node_box_x_encode, node_new_shape, false); - auto node_box_y_new_shape = std::make_shared(node_box_y_encode, node_new_shape, false); - auto node_box_w_new_shape = std::make_shared(node_box_w_encode, node_new_shape, false); - auto node_box_h_new_shape = std::make_shared(node_box_h_encode, node_new_shape, false); - auto node_pred_box = std::make_shared(OutputVector{node_box_x_new_shape, node_box_y_new_shape, - node_box_w_new_shape, node_box_h_new_shape}, 4); + auto node_anchor_w = node_anchor_split->output(0); + auto node_anchor_h = node_anchor_split->output(1); - auto node_conf_cast = std::make_shared(node_conf_bool, f32); //FIMXE: to=1 + auto node_new_anchor_shape = + Constant::create(i64, {4}, {1, num_anchors, 1, 1}); + auto node_anchor_w_reshape = + std::make_shared(node_anchor_w, node_new_anchor_shape, false); + auto node_anchor_h_reshape = + std::make_shared(node_anchor_h, node_new_anchor_shape, false); - auto node_pred_box_mul_conf = std::make_shared(node_pred_box, node_conf_cast); //(1,3,19,19,4) (1,3,19,19,1) + auto squeeze_box_wh = Constant::create(i64, {1}, {4}); + auto node_box_w_squeeze = std::make_shared(node_box_w, squeeze_box_wh); + auto node_box_h_squeeze = std::make_shared(node_box_h, squeeze_box_wh); - auto node_box_shape = Constant::create(i64, {3}, {1, int(num_anchors) * input_height * input_width, 4}); - auto node_pred_box_new_shape = std::make_shared(node_pred_box_mul_conf, node_box_shape, false); //(1,3*19*19,4) + auto node_box_w_exp = std::make_shared(node_box_w_squeeze); + auto node_box_h_exp = std::make_shared(node_box_h_squeeze); - auto pred_box_split_axis = Constant::create(i32, {}, {2}); - auto node_pred_box_split = std::make_shared(node_pred_box_new_shape, pred_box_split_axis, 4); - - auto node_pred_box_x = node_pred_box_split->output(0); - auto node_pred_box_y = node_pred_box_split->output(1); - auto node_pred_box_w = node_pred_box_split->output(2); - auto node_pred_box_h = node_pred_box_split->output(3); - - /* x,y,w,h -> x1,y1,x2,y2 */ - auto node_number_two = Constant::create(f32, {1}, {2.0f}); - auto node_half_w = std::make_shared(node_pred_box_w, node_number_two); - auto node_half_h = std::make_shared(node_pred_box_h, node_number_two); - - auto node_pred_box_x1 = std::make_shared(node_pred_box_x, node_half_w); - auto node_pred_box_y1 = std::make_shared(node_pred_box_y, node_half_h); - - auto node_pred_box_x2 = std::make_shared(node_pred_box_x, node_half_w); - auto node_pred_box_y2 = std::make_shared(node_pred_box_y, node_half_h); - - /* map normalized coords to original image */ - auto squeeze_image_size_axes = Constant::create(i64, {1}, {0}); - auto node_sqeeze_image_size = std::make_shared(image_size, squeeze_image_size_axes); // input ImgSize - - auto image_size_split_axis = Constant::create(i32, {}, {-1}); - auto node_image_size_split = std::make_shared(node_sqeeze_image_size, image_size_split_axis, 2); - auto node_img_height = node_image_size_split->output(0); - auto node_img_width = node_image_size_split->output(1); - - auto node_img_width_cast = std::make_shared(node_img_width, f32); //FIMXE: to=1 - auto node_img_height_cast = std::make_shared(node_img_height, f32); - - auto node_pred_box_x1_decode = std::make_shared(node_pred_box_x1, node_img_width_cast); - auto node_pred_box_y1_decode = std::make_shared(node_pred_box_y1, node_img_height_cast); - auto node_pred_box_x2_decode = std::make_shared(node_pred_box_x2, node_img_width_cast); - auto node_pred_box_y2_decode = std::make_shared(node_pred_box_y2, node_img_height_cast); - - // reference - // Paddle/python/paddle/fluid/tests/unittests/test_yolo_box_op.py - // Paddle/paddle/fluid/operators/detection/yolo_box_op.h - // Paddle2ONNX/paddle2onnx/op_mapper/detection/yolo_box.py - clip_bbox is not used by Paddle2ONNX. - std::shared_ptr node_pred_box_result; - if (clip_bbox) { - auto node_number_one = Constant::create(f32, {1}, {1.0}); - auto node_new_img_height = std::make_shared(node_img_height_cast, node_number_one); - auto node_new_img_width = std::make_shared(node_img_width_cast, node_number_one); - auto node_pred_box_x2_sub_w = std::make_shared(node_pred_box_x2_decode, node_new_img_width); //x2 - (w-1) - auto node_pred_box_y2_sub_h = std::make_shared(node_pred_box_y2_decode, node_new_img_height); //y2 - (h-1) - - auto max_const = std::numeric_limits::max(); - auto node_pred_box_x1_clip = std::make_shared(node_pred_box_x1_decode, 0.0f, max_const); - auto node_pred_box_y1_clip = std::make_shared(node_pred_box_y1_decode, 0.0f, max_const); - auto node_pred_box_x2_clip = std::make_shared(node_pred_box_x2_sub_w, 0.0f, max_const); - auto node_pred_box_y2_clip = std::make_shared(node_pred_box_y2_sub_h, 0.0f, max_const); - - auto node_pred_box_x2_res = std::make_shared(node_pred_box_x2_decode, node_pred_box_x2_clip); - auto node_pred_box_y2_res = std::make_shared(node_pred_box_y2_decode, node_pred_box_y2_clip); - - node_pred_box_result = std::make_shared(OutputVector{node_pred_box_x1_clip, node_pred_box_y1_clip, - node_pred_box_x2_res, node_pred_box_y2_res}, -1); //outputs=node.output('Boxes') - } - else { - node_pred_box_result = std::make_shared(OutputVector{node_pred_box_x1_decode, node_pred_box_y1_decode, - node_pred_box_x2_decode, node_pred_box_y2_decode}, -1); //outputs=node.output('Boxes') - } - - // - auto node_score_shape = Constant::create(i64, {score_shape.size()}, score_shape); - auto node_score_new_shape = std::make_shared(node_score, node_score_shape, false); //outputs=node.output('Scores') - - NamedOutputs outputs; - outputs["Boxes"] = {node_pred_box_result}; - outputs["Scores"] = {node_score_new_shape}; - return outputs; - -} - -}}}} + auto node_box_w_encode = + std::make_shared(node_box_w_exp, node_anchor_w_reshape); + auto node_box_h_encode = + std::make_shared(node_box_h_exp, node_anchor_h_reshape); + + // confidence + auto node_conf_sigmoid = std::make_shared(node_conf); + + auto node_conf_thresh = Constant::create( + f32, {1, num_anchors, input_height, input_width, 1}, conf_thresh_mat); + + auto node_conf_sub = std::make_shared(node_conf_sigmoid, node_conf_thresh); + + auto node_conf_clip = std::make_shared( + node_conf_sub, + 0.0f, + std::numeric_limits::max()); // FIXME: PDPD not specify min/max + + auto node_zeros = Constant::create(f32, {1}, {0}); + auto node_conf_clip_bool = std::make_shared(node_conf_clip, node_zeros); + + auto node_conf_clip_cast = + std::make_shared(node_conf_clip_bool, f32); // FIMXE: to=1 + + auto node_conf_set_zero = + std::make_shared(node_conf_sigmoid, node_conf_clip_cast); + + /* probability */ + auto node_prob_sigmoid = std::make_shared(node_prob); + + auto node_new_shape = Constant::create( + i64, {5}, {1, int(num_anchors), input_height, input_width, 1}); + auto node_conf_new_shape = + std::make_shared(node_conf_set_zero, node_new_shape, false); + + // broadcast confidence * probability of each category + auto node_score = std::make_shared(node_prob_sigmoid, node_conf_new_shape); + + // for bbox which has object (greater than threshold) + auto node_conf_bool = std::make_shared(node_conf_new_shape, node_zeros); + + auto node_box_x_new_shape = + std::make_shared(node_box_x_encode, node_new_shape, false); + auto node_box_y_new_shape = + std::make_shared(node_box_y_encode, node_new_shape, false); + auto node_box_w_new_shape = + std::make_shared(node_box_w_encode, node_new_shape, false); + auto node_box_h_new_shape = + std::make_shared(node_box_h_encode, node_new_shape, false); + auto node_pred_box = std::make_shared(OutputVector{node_box_x_new_shape, + node_box_y_new_shape, + node_box_w_new_shape, + node_box_h_new_shape}, + 4); + + auto node_conf_cast = std::make_shared(node_conf_bool, f32); // FIMXE: to=1 + + auto node_pred_box_mul_conf = std::make_shared( + node_pred_box, node_conf_cast); //(1,3,19,19,4) (1,3,19,19,1) + + auto node_box_shape = Constant::create( + i64, {3}, {1, int(num_anchors) * input_height * input_width, 4}); + auto node_pred_box_new_shape = std::make_shared( + node_pred_box_mul_conf, node_box_shape, false); //(1,3*19*19,4) + + auto pred_box_split_axis = Constant::create(i32, {}, {2}); + auto node_pred_box_split = + std::make_shared(node_pred_box_new_shape, pred_box_split_axis, 4); + + auto node_pred_box_x = node_pred_box_split->output(0); + auto node_pred_box_y = node_pred_box_split->output(1); + auto node_pred_box_w = node_pred_box_split->output(2); + auto node_pred_box_h = node_pred_box_split->output(3); + + /* x,y,w,h -> x1,y1,x2,y2 */ + auto node_number_two = Constant::create(f32, {1}, {2.0f}); + auto node_half_w = std::make_shared(node_pred_box_w, node_number_two); + auto node_half_h = std::make_shared(node_pred_box_h, node_number_two); + + auto node_pred_box_x1 = std::make_shared(node_pred_box_x, node_half_w); + auto node_pred_box_y1 = std::make_shared(node_pred_box_y, node_half_h); + + auto node_pred_box_x2 = std::make_shared(node_pred_box_x, node_half_w); + auto node_pred_box_y2 = std::make_shared(node_pred_box_y, node_half_h); + + /* map normalized coords to original image */ + auto squeeze_image_size_axes = Constant::create(i64, {1}, {0}); + auto node_sqeeze_image_size = + std::make_shared(image_size, squeeze_image_size_axes); // input ImgSize + + auto image_size_split_axis = Constant::create(i32, {}, {-1}); + auto node_image_size_split = + std::make_shared(node_sqeeze_image_size, image_size_split_axis, 2); + auto node_img_height = node_image_size_split->output(0); + auto node_img_width = node_image_size_split->output(1); + + auto node_img_width_cast = + std::make_shared(node_img_width, f32); // FIMXE: to=1 + auto node_img_height_cast = std::make_shared(node_img_height, f32); + + auto node_pred_box_x1_decode = + std::make_shared(node_pred_box_x1, node_img_width_cast); + auto node_pred_box_y1_decode = + std::make_shared(node_pred_box_y1, node_img_height_cast); + auto node_pred_box_x2_decode = + std::make_shared(node_pred_box_x2, node_img_width_cast); + auto node_pred_box_y2_decode = + std::make_shared(node_pred_box_y2, node_img_height_cast); + + // reference + // Paddle/python/paddle/fluid/tests/unittests/test_yolo_box_op.py + // Paddle/paddle/fluid/operators/detection/yolo_box_op.h + // Paddle2ONNX/paddle2onnx/op_mapper/detection/yolo_box.py - clip_bbox is not + // used by Paddle2ONNX. + std::shared_ptr node_pred_box_result; + if (clip_bbox) + { + auto node_number_one = Constant::create(f32, {1}, {1.0}); + auto node_new_img_height = + std::make_shared(node_img_height_cast, node_number_one); + auto node_new_img_width = + std::make_shared(node_img_width_cast, node_number_one); + auto node_pred_box_x2_sub_w = std::make_shared( + node_pred_box_x2_decode, node_new_img_width); // x2 - (w-1) + auto node_pred_box_y2_sub_h = std::make_shared( + node_pred_box_y2_decode, node_new_img_height); // y2 - (h-1) + + auto max_const = std::numeric_limits::max(); + auto node_pred_box_x1_clip = + std::make_shared(node_pred_box_x1_decode, 0.0f, max_const); + auto node_pred_box_y1_clip = + std::make_shared(node_pred_box_y1_decode, 0.0f, max_const); + auto node_pred_box_x2_clip = + std::make_shared(node_pred_box_x2_sub_w, 0.0f, max_const); + auto node_pred_box_y2_clip = + std::make_shared(node_pred_box_y2_sub_h, 0.0f, max_const); + + auto node_pred_box_x2_res = + std::make_shared(node_pred_box_x2_decode, node_pred_box_x2_clip); + auto node_pred_box_y2_res = + std::make_shared(node_pred_box_y2_decode, node_pred_box_y2_clip); + + node_pred_box_result = std::make_shared(OutputVector{node_pred_box_x1_clip, + node_pred_box_y1_clip, + node_pred_box_x2_res, + node_pred_box_y2_res}, + -1); // outputs=node.output('Boxes') + } + else + { + node_pred_box_result = + std::make_shared(OutputVector{node_pred_box_x1_decode, + node_pred_box_y1_decode, + node_pred_box_x2_decode, + node_pred_box_y2_decode}, + -1); // outputs=node.output('Boxes') + } + + // + auto node_score_shape = + Constant::create(i64, {score_shape.size()}, score_shape); + auto node_score_new_shape = std::make_shared( + node_score, node_score_shape, false); // outputs=node.output('Scores') + + NamedOutputs outputs; + outputs["Boxes"] = {node_pred_box_result}; + outputs["Scores"] = {node_score_new_shape}; + return outputs; + } + + } // namespace op +} // namespace pdpd diff --git a/ngraph/frontend/paddlepaddle/src/op/yolo_box.hpp b/ngraph/frontend/paddlepaddle/src/op/yolo_box.hpp index c4da5b3ac6e214..95661a3ccebd58 100644 --- a/ngraph/frontend/paddlepaddle/src/op/yolo_box.hpp +++ b/ngraph/frontend/paddlepaddle/src/op/yolo_box.hpp @@ -5,11 +5,17 @@ #pragma once #include "node_context.hpp" -namespace ngraph { -namespace frontend { -namespace pdpd { -namespace op { +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + namespace op + { + NamedOutputs yolo_box(const NodeContext& node); -NamedOutputs yolo_box (const NodeContext& node); - -}}}} \ No newline at end of file + } + } // namespace pdpd + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/op_table.cpp b/ngraph/frontend/paddlepaddle/src/op_table.cpp index 180f47df6f53b8..bc3f75cf9ec1f9 100644 --- a/ngraph/frontend/paddlepaddle/src/op_table.cpp +++ b/ngraph/frontend/paddlepaddle/src/op_table.cpp @@ -10,8 +10,8 @@ #include "op/concat.hpp" #include "op/conv2d.hpp" #include "op/elementwise_ops.hpp" -#include "op/fill_constant_batch_size_like.hpp" #include "op/fill_constant.hpp" +#include "op/fill_constant_batch_size_like.hpp" #include "op/flatten_contiguous_range.hpp" #include "op/interp.hpp" #include "op/leakyrelu.hpp" @@ -29,54 +29,50 @@ #include "op/unsqueeze.hpp" #include "op/yolo_box.hpp" - #include "op_table.hpp" +using namespace ngraph; +using namespace ngraph::frontend; -namespace ngraph { - namespace frontend { - namespace pdpd { - - std::map get_supported_ops() { - return { - {"arg_max", op::argmax}, - {"assign_value", op::assign_value}, - {"batch_norm", op::batch_norm}, - {"bilinear_interp_v2", op::bilinear_interp_v2}, - {"bilinear_interp", op::bilinear_interp_v2}, - {"cast", op::cast}, - {"clip", op::clip}, - {"concat", op::concat}, - {"conv2d", op::conv2d}, - {"elementwise_add", op::elementwise_add}, - {"elementwise_div", op::elementwise_div}, - {"elementwise_max", op::elementwise_max}, - {"elementwise_min", op::elementwise_min}, - {"elementwise_mul", op::elementwise_mul}, - {"elementwise_pow", op::elementwise_pow}, - {"elementwise_sub", op::elementwise_sub}, - {"fill_constant_batch_size_like", op::fill_constant_batch_size_like}, - {"fill_constant", op::fill_constant}, - {"flatten_contiguous_range", op::flatten_contiguous_range}, - {"leaky_relu", op::leaky_relu}, - {"matmul", op::matmul}, - {"mul", op::mul}, - {"nearest_interp_v2", op::nearest_interp_v2}, - {"nearest_interp", op::nearest_interp_v2}, - {"pad3d", op::pad3d}, - {"pool2d", op::pool2d}, - {"relu", op::relu}, - {"reshape2", op::reshape2}, - {"scale", op::scale}, - {"slice", op::slice}, - {"softmax", op::softmax}, - {"split", op::split}, - {"squeeze2", op::squeeze}, - {"unsqueeze2", op::unsqueeze}, - {"yolo_box", op::yolo_box} - }; - }; +namespace pdpd +{ + std::map get_supported_ops() + { + return {{"arg_max", op::argmax}, + {"assign_value", op::assign_value}, + {"batch_norm", op::batch_norm}, + {"bilinear_interp_v2", op::bilinear_interp_v2}, + {"bilinear_interp", op::bilinear_interp_v2}, + {"cast", op::cast}, + {"clip", op::clip}, + {"concat", op::concat}, + {"conv2d", op::conv2d}, + {"elementwise_add", op::elementwise_add}, + {"elementwise_div", op::elementwise_div}, + {"elementwise_max", op::elementwise_max}, + {"elementwise_min", op::elementwise_min}, + {"elementwise_mul", op::elementwise_mul}, + {"elementwise_pow", op::elementwise_pow}, + {"elementwise_sub", op::elementwise_sub}, + {"fill_constant_batch_size_like", op::fill_constant_batch_size_like}, + {"fill_constant", op::fill_constant}, + {"flatten_contiguous_range", op::flatten_contiguous_range}, + {"leaky_relu", op::leaky_relu}, + {"matmul", op::matmul}, + {"mul", op::mul}, + {"nearest_interp_v2", op::nearest_interp_v2}, + {"nearest_interp", op::nearest_interp_v2}, + {"pad3d", op::pad3d}, + {"pool2d", op::pool2d}, + {"relu", op::relu}, + {"reshape2", op::reshape2}, + {"scale", op::scale}, + {"slice", op::slice}, + {"softmax", op::softmax}, + {"split", op::split}, + {"squeeze2", op::squeeze}, + {"unsqueeze2", op::unsqueeze}, + {"yolo_box", op::yolo_box}}; + }; - } - } -} +} // namespace pdpd diff --git a/ngraph/frontend/paddlepaddle/src/op_table.hpp b/ngraph/frontend/paddlepaddle/src/op_table.hpp index 48e0680f202923..e9d6fee2172b3d 100644 --- a/ngraph/frontend/paddlepaddle/src/op_table.hpp +++ b/ngraph/frontend/paddlepaddle/src/op_table.hpp @@ -5,20 +5,23 @@ #pragma once #include -#include #include +#include #include #include "node_context.hpp" +namespace ngraph +{ + namespace frontend + { + namespace pdpd + { + using CreatorFunction = std::function; -namespace ngraph { -namespace frontend { -namespace pdpd { - -using CreatorFunction = std::function; - -std::map get_supported_ops(); + std::map get_supported_ops(); -}}} + } // namespace pdpd + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/place.cpp b/ngraph/frontend/paddlepaddle/src/place.cpp index f746e07ec7447d..8deaddfed0d238 100644 --- a/ngraph/frontend/paddlepaddle/src/place.cpp +++ b/ngraph/frontend/paddlepaddle/src/place.cpp @@ -14,96 +14,113 @@ // limitations under the License. //***************************************************************************** - #include -#include "framework.pb.h" #include "decoder.hpp" +#include "framework.pb.h" using namespace ngraph; -using namespace frontend; +using namespace ngraph::frontend; -bool PlacePDPD::isInput() const { - const auto &model_ins = m_input_model.getInputs(); +bool PlacePDPD::isInput() const +{ + const auto& model_ins = m_input_model.getInputs(); - const auto cmp = [this](const Place::Ptr &p) { - return p.get() == this; - }; + const auto cmp = [this](const Place::Ptr& p) { return p.get() == this; }; return std::find_if(model_ins.begin(), model_ins.end(), cmp) != model_ins.end(); } -bool PlacePDPD::isOutput() const { - const auto &model_outs = m_input_model.getOutputs(); - const auto cmp = [this](const Place::Ptr &p) { - return p.get() == this; - }; +bool PlacePDPD::isOutput() const +{ + const auto& model_outs = m_input_model.getOutputs(); + const auto cmp = [this](const Place::Ptr& p) { return p.get() == this; }; return std::find_if(model_outs.begin(), model_outs.end(), cmp) != model_outs.end(); } -OpPlacePDPD::OpPlacePDPD(const InputModel &input_model, const std::vector &names, - const std::shared_ptr &op_desc) - : PlacePDPD(input_model, names), - m_op_desc(op_desc) { - +OpPlacePDPD::OpPlacePDPD(const InputModel& input_model, + const std::vector& names, + const std::shared_ptr& op_desc) + : PlacePDPD(input_model, names) + , m_op_desc(op_desc) +{ } -OpPlacePDPD::OpPlacePDPD(const InputModel &input_model, - const std::shared_ptr &op_desc) - : OpPlacePDPD(input_model, {}, op_desc) { +OpPlacePDPD::OpPlacePDPD(const InputModel& input_model, + const std::shared_ptr& op_desc) + : OpPlacePDPD(input_model, {}, op_desc) +{ } -TensorPlacePDPD::TensorPlacePDPD(const InputModel &input_model, const std::vector &names, - const std::shared_ptr &var_desc) - : PlacePDPD(input_model, names), - m_var_desc(var_desc) { +TensorPlacePDPD::TensorPlacePDPD(const InputModel& input_model, + const std::vector& names, + const std::shared_ptr& var_desc) + : PlacePDPD(input_model, names) + , m_var_desc(var_desc) +{ const auto& var_type = var_desc->type(); - if (var_type.type() == paddle::framework::proto::VarType::LOD_TENSOR) { + if (var_type.type() == paddle::framework::proto::VarType::LOD_TENSOR) + { const auto& tensor_desc = var_type.lod_tensor().tensor(); m_type = TYPE_MAP[tensor_desc.data_type()]; - m_pshape = PartialShape(std::vector(tensor_desc.dims().begin(), tensor_desc.dims().end())); + m_pshape = PartialShape( + std::vector(tensor_desc.dims().begin(), tensor_desc.dims().end())); } } -TensorPlacePDPD::TensorPlacePDPD(const InputModel &input_model, - const std::shared_ptr &var_desc) - : TensorPlacePDPD(input_model, {var_desc->name()}, var_desc) { +TensorPlacePDPD::TensorPlacePDPD(const InputModel& input_model, + const std::shared_ptr& var_desc) + : TensorPlacePDPD(input_model, {var_desc->name()}, var_desc) +{ } -std::vector TensorPlacePDPD::getConsumingPorts() const { +std::vector TensorPlacePDPD::getConsumingPorts() const +{ std::vector consuming_ports; - for (const auto & consuming_port: m_consuming_ports) { - if (const auto& locked = consuming_port.lock()) { + for (const auto& consuming_port : m_consuming_ports) + { + if (const auto& locked = consuming_port.lock()) + { consuming_ports.push_back(locked); - } else { + } + else + { PDPD_THROW("Consuming Port has expired."); } } return consuming_ports; } -Place::Ptr TensorPlacePDPD::getProducingPort() const { +Place::Ptr TensorPlacePDPD::getProducingPort() const +{ PDPD_ASSERT(m_producing_ports.size() > 1, "Only one producing port is supported."); - if (const auto& producing_port = m_producing_ports[0].lock()) { + if (const auto& producing_port = m_producing_ports[0].lock()) + { return producing_port; } PDPD_THROW("Producing Port has expired."); } -std::shared_ptr InPortPlacePDPD::getSourceTensorPDPD() const { - if (const auto& tensor = m_source_tensor.lock()) { +std::shared_ptr InPortPlacePDPD::getSourceTensorPDPD() const +{ + if (const auto& tensor = m_source_tensor.lock()) + { return tensor; } PDPD_THROW("Source Tensor has expired."); } -std::shared_ptr InPortPlacePDPD::getOp() { - if (const auto& op = m_op.lock()) { +std::shared_ptr InPortPlacePDPD::getOp() +{ + if (const auto& op = m_op.lock()) + { return op; } PDPD_THROW("Operation has expired."); } -std::shared_ptr OutPortPlacePDPD::getTargetTensorPDPD() const { - if (const auto& target_tensor = m_target_tensor.lock()) { +std::shared_ptr OutPortPlacePDPD::getTargetTensorPDPD() const +{ + if (const auto& target_tensor = m_target_tensor.lock()) + { return target_tensor; } PDPD_THROW("Target Tensor has expired.");