Skip to content

Commit

Permalink
Merge pull request #14 from mvafin/mvafin/pt_fe/subgraphs
Browse files Browse the repository at this point in the history
Support shufflenet and add layer test for relu
  • Loading branch information
slyalin authored Oct 4, 2022
2 parents 779ee1d + 7b4c43f commit 59552da
Show file tree
Hide file tree
Showing 18 changed files with 571 additions and 206 deletions.
3 changes: 2 additions & 1 deletion src/core/src/op/scatter_elements_update.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,9 @@ void op::v3::ScatterElementsUpdate::validate_and_infer_types() {

NODE_VALIDATION_CHECK(this, axis_et.is_integral(), "Axis element type must be integral_number, but is: ", axis_et);

element::Type merged_type;
NODE_VALIDATION_CHECK(this,
data_et == updates_et,
element::Type::merge(merged_type, data_et, updates_et),
"Data type and updates type are required to be the same. ",
"Got: ",
data_et,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,19 +19,51 @@ class PYTORCH_API FrontEnd : public ov::frontend::FrontEnd {
public:
using Ptr = std::shared_ptr<FrontEnd>;

/// \brief Completely convert and normalize entire Model, throws if it is not possible
/// \param model Input model
/// \return fully converted OV Model
std::shared_ptr<Model> convert(const ov::frontend::InputModel::Ptr& model) const override;

/// \brief Completely convert the remaining, not converted part of a Model.
/// \param partiallyConverted partially converted OV Model
void convert(const std::shared_ptr<Model>& partiallyConverted) const override;

/// \brief Convert only those parts of the model that can be converted leaving others
/// as-is. Converted parts are not normalized by additional transformations; normalize
/// function or another form of convert function should be called to finalize the
/// conversion process.
/// \param model Input model
/// \return partially converted OV Model
std::shared_ptr<Model> convert_partially(const InputModel::Ptr& model) const override;

/// \brief Convert operations with one-to-one mapping with decoding nodes.
/// Each decoding node is an OV node representing a single FW operation node with
/// all attributes represented in FW-independent way.
/// \param model Input model
/// \return OV Model after decoding
std::shared_ptr<Model> decode(const InputModel::Ptr& model) const override;

/// \brief Runs normalization passes on Model that was loaded with partial conversion
/// \param Model partially converted OV Model
void normalize(const std::shared_ptr<ov::Model>& model) const override;

/// \brief Gets name of this FrontEnd. Can be used by clients
/// if frontend is selected automatically by FrontEndManager::load_by_model
/// \return Paddle frontend name.
std::string get_name() const override {
return "pytorch";
}

/// \brief Register base extension in the FrontEnd
/// \param extension base extension
void add_extension(const std::shared_ptr<ov::Extension>& extension) override;

protected:
bool supported_impl(const std::vector<ov::Any>& variants) const override;

ov::frontend::InputModel::Ptr load_impl(const std::vector<ov::Any>& variants) const override;
};

} // namespace pytorch
} // namespace frontend
} // namespace ov

} // namespace pytorch
} // namespace frontend
} // namespace ov
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ class NodeContext : public frontend::NodeContext {
parameter->get_output_tensor(0).add_names({std::to_string(index)});
(*m_tensor_map)[index] = parameter;
m_external_parameters->push_back(parameter);
std::cout << "Nested case, created: " << parameter << std::endl;
//std::cout << "Nested case, created: " << parameter << std::endl;
return parameter;
}
}
Expand Down
58 changes: 47 additions & 11 deletions src/frontends/pytorch/src/frontend.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,22 +9,44 @@
#include "exception.hpp"
#include "input_model.hpp"
#include "transforms.hpp"
#include "openvino/frontend/pytorch/node_context.hpp"
#include "op_table.hpp"
#include "openvino/frontend/exception.hpp"
#include "openvino/frontend/pytorch/node_context.hpp"
#include "pt_framework_node.hpp"
#include "transforms/aten_cat_replacer.hpp"
#include "transforms/prim_list_unpack_replacer.hpp"
#include "utils.hpp"

namespace ov {
namespace frontend {
namespace pytorch {

std::shared_ptr<Model> FrontEnd::convert(const ov::frontend::InputModel::Ptr& model) const {
auto converted_model = convert_partially(model);
normalize(converted_model);
std::set<std::string> unconverted_ops_types;
for (const auto& node : converted_model->get_ordered_ops()) {
if (const auto& fw_node = ov::as_type_ptr<ov::frontend::pytorch::PtFrameworkNode>(node)) {
auto op_type = fw_node->get_decoder()->get_op_type();
unconverted_ops_types.insert(op_type);
}
}
std::stringstream ops_str;
for (auto&& op_type : unconverted_ops_types) {
ops_str << op_type << "\n";
}
FRONT_END_OP_CONVERSION_CHECK(unconverted_ops_types.size() == 0,
"Model wasn't fully converted. Unconverted operation types:\n" + ops_str.str());
return converted_model;
}

void FrontEnd::convert(const std::shared_ptr<Model>& partiallyConverted) const {
FRONT_END_NOT_IMPLEMENTED(convert);
}

std::shared_ptr<Model> FrontEnd::convert_partially(const ov::frontend::InputModel::Ptr& model) const {
try {
// std::cerr << "[ HERE ]\n";
auto pytorch_model = std::dynamic_pointer_cast<pytorch::InputModel>(model);
// TODO: Remove this super-hack, tensor_map should be local for each conversion activity, see more info where
// tensor_map is defined now
auto model = convert_pytorch_model(pytorch_model->m_model);

// TODO: Propose better solution for the next code block
Expand All @@ -36,8 +58,8 @@ std::shared_ptr<Model> FrontEnd::convert(const ov::frontend::InputModel::Ptr& mo
auto self = model->get_parameters()[0];
if (self->output(0).get_target_inputs().empty()) {
// There is no consumers: safe to remove
std::cout << "[ WARNING ] Removing parameter[0] in converted Pytorch model, because it is never "
"used and treated as `self`\n";
// std::cout << "[ WARNING ] Removing parameter[0] in converted Pytorch model, because it is never "
// "used and treated as `self`\n";
model->remove_parameter(self);
} else {
std::cout << "[ WARNING ] Couldn't remove parameter[0] in converted Pytorch model\n";
Expand All @@ -46,25 +68,39 @@ std::shared_ptr<Model> FrontEnd::convert(const ov::frontend::InputModel::Ptr& mo
apply_pytorch_conversion_transforms(model);
return model;
} catch (const std::runtime_error& e) {
std::cerr << "[ ERROR ] Error while converting pytorch model: " << e.what() << "\n";
std::cerr << "[ ERROR ] Unexpected error while converting pytorch model: " << e.what() << "\n";
std::cerr << "Rethrowing. Misleading error message from pybind11 may come next. TODO.";
throw;
}
}

std::shared_ptr<Model> FrontEnd::decode(const InputModel::Ptr& model) const {
FRONT_END_NOT_IMPLEMENTED(decode);
}

void FrontEnd::normalize(const std::shared_ptr<ov::Model>& model) const {
ov::pass::Manager manager;

manager.register_pass<ov::frontend::pytorch::pass::AtenCatToConcat>();
manager.register_pass<ov::frontend::pytorch::pass::PrimListUnpackReplacer>();

manager.run_passes(model);
}

void FrontEnd::add_extension(const std::shared_ptr<ov::Extension>& extension) {
FRONT_END_NOT_IMPLEMENTED(add_extension);
}

bool FrontEnd::supported_impl(const std::vector<ov::Any>& variants) const {
// std::cout << "[ ----- DEBUG ------ ] supported_impl with " << variants.size() << " arguments\n";
return false;
}

ov::frontend::InputModel::Ptr FrontEnd::load_impl(const std::vector<ov::Any>& variants) const {
// std::cout << "[ ----- DEBUG ----- ] load_impl with " << variants.size() << " parameters\n";
if (variants.size() != 1) {
throw std::runtime_error("Pytorch frontend supports exactly one parameter in model representation, got " +
std::to_string(variants.size()) + "instead.");
std::to_string(variants.size()) + " instead.");
}
auto decoder = variants[0].as<std::shared_ptr<Decoder>>();
// std::cout << "Recognized decoder: " << decoder << "\n";
return std::make_shared<pytorch::InputModel>(decoder);
}

Expand Down
4 changes: 2 additions & 2 deletions src/frontends/pytorch/src/node_context.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -49,8 +49,8 @@ std::shared_ptr<ov::Model> NodeContext::convert_subgraph(size_t index) {
auto parameter = model->get_parameters()[i];
if (parameter->output(0).get_target_inputs().empty()) {
// There is no consumers: safe to remove
std::cout << "[ WARNING ] Removing parameter " << parameter
<< " in converted Pytorch model, because it is never used" << std::endl;
//std::cout << "[ WARNING ] Removing parameter " << parameter
// << " in converted Pytorch model, because it is never used" << std::endl;
model->remove_parameter(parameter);
}
}
Expand Down
64 changes: 64 additions & 0 deletions src/frontends/pytorch/src/op/transpose.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include <climits>

#include "openvino/frontend/pytorch/node_context.hpp"
#include "openvino/opsets/opset8.hpp"
#include "utils.hpp"

namespace ov {
namespace frontend {
namespace pytorch {
namespace op {

OutputVector translate_transpose(NodeContext& context) {
auto dim0 = context.const_input<int64_t>(1);
auto dim1 = context.const_input<int64_t>(2);
auto shape = std::make_shared<opset8::ShapeOf>(context.get_input(0), element::i32);
auto rank_ = std::make_shared<opset8::ShapeOf>(shape, element::i32);
auto rank = std::make_shared<opset8::Squeeze>(rank_);
// Use opset::If for dim normalization
auto dim0_node = context.get_input(1);
auto dim1_node = context.get_input(2);
if (dim0 < 0) {
dim0_node = std::make_shared<opset8::Add>(rank, dim0_node);
}
if (dim1 < 0) {
dim1_node = std::make_shared<opset8::Add>(rank, dim1_node);
}
auto start = opset8::Constant::create(element::i32, {}, {0});
auto step = opset8::Constant::create(element::i32, {}, {1});
auto range = std::make_shared<opset8::Range>(start, rank, step, element::i32);

auto axis_0 = opset8::Constant::create(element::i64, Shape{}, {0});
dim0_node = std::make_shared<opset8::Unsqueeze>(dim0_node, axis_0);
dim1_node = std::make_shared<opset8::Unsqueeze>(dim1_node, axis_0);
auto indices = std::make_shared<opset8::Concat>(OutputVector{dim0_node, dim1_node}, 0);
auto updates = std::make_shared<opset8::Concat>(OutputVector{dim1_node, dim0_node}, 0);
auto scatter = std::make_shared<opset8::ScatterElementsUpdate>(range, indices, updates, axis_0);

/*auto data_pshape = context.get_input(0).get_partial_shape();
auto rank = data_pshape.rank();
OV_FRONTEND_REQUIRE(rank.is_static());
auto _rank = rank.get_length();
if (dim0 < 0) {
dim0 = _rank + dim0;
}
if (dim1 < 0) {
dim1 = _rank + dim1;
}
OV_FRONTEND_REQUIRE(dim0 > 0 && dim1 > 0);
OV_FRONTEND_REQUIRE(dim0 < _rank && dim1 < _rank);
std::vector<int64_t> order(_rank, 0);
std::iota(order.begin(), order.end(), 0);
std::swap(order[dim0], order[dim1]);
auto order_const = context.mark_node(opset8::Constant::create(element::i64, {order.size()}, order));*/
return {context.mark_node(std::make_shared<opset8::Transpose>(context.get_input(0), scatter))};
};

} // namespace op
} // namespace pytorch
} // namespace frontend
} // namespace ov
Loading

0 comments on commit 59552da

Please sign in to comment.