Skip to content

Commit

Permalink
Merged list of tensor custom data type support and recent advances in…
Browse files Browse the repository at this point in the history
… PT FE organization and new ops support (basically control flow)
  • Loading branch information
slyalin committed Oct 2, 2022
2 parents 6612e25 + c71072a commit a53788e
Show file tree
Hide file tree
Showing 20 changed files with 2,014 additions and 1,078 deletions.
16 changes: 12 additions & 4 deletions src/bindings/python/src/openvino/frontend/pytorch/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,8 +55,8 @@ def __init__ (self, pt_module):
def inputs (self):
return [x.unique() for x in self.pt_module.inputs()]

def input (self, index):
return self.inputs()[index] # TODO: find specialized method
def input (self, index): # TODO: remove
return self.inputs()[index] # TODO: find specialized method

def get_input_shape (self, index):
input = self._raw_input(index)
Expand Down Expand Up @@ -182,6 +182,9 @@ def get_subgraph_decoder (self, index):
def get_op_type (self):
return self.pt_module.kind()

def get_schema (self):
return self.pt_module.schema()

def outputs (self):
return [x.unique() for x in self.pt_module.outputs()]

Expand Down Expand Up @@ -228,7 +231,7 @@ def as_constant (self):
if str(pt_value.type()) in ['torch.int32', 'int']:
#print(f'Found int value= {pt_value}, type = {type(pt_value.toIValue())}, ivalue = {pt_value.toIValue()}')
return op.Constant(OVType.i32, Shape([]), [pt_value.toIValue()]).outputs()
if str(pt_value.type()) in ['torch.FloatType', 'float']:
if str(pt_value.type()) in ['torch.float', 'torch.FloatType', 'float']:
#print(f'Found float value= {pt_value}, type = {type(pt_value.toIValue())}, ivalue = {pt_value.toIValue()}')
return op.Constant(OVType.f32, Shape([]), [pt_value.toIValue()]).outputs()
if str(pt_value.type()) in ['torch.bool', 'bool']:
Expand Down Expand Up @@ -272,7 +275,12 @@ def as_constant_list (self, pt_value):
return ov_const.outputs()

def input_is_none (self, index):
return index >= len(self.inputs()) or self._raw_input(index) is None
if index >= len(self.inputs()) or self._raw_input(index) is None:
return True
else:
r_input = self._raw_input(index)
return str(r_input.type()) in ['torch.NoneType', 'NoneType']


def debug (self):
print(f'DEBUG CALLED FOR {self._raw_output(0)}')
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,9 @@ namespace py = pybind11;
std::string get_op_type() const override
{ PYBIND11_OVERRIDE_PURE(std::string, Decoder, get_op_type); }

std::string get_schema() const override
{ PYBIND11_OVERRIDE_PURE(std::string, Decoder, get_schema); }

size_t num_of_outputs () const override
{ PYBIND11_OVERRIDE_PURE(size_t, Decoder, num_of_outputs); }

Expand Down
9 changes: 7 additions & 2 deletions src/core/include/openvino/op/util/framework_node.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
#include "openvino/core/partial_shape.hpp"
#include "openvino/core/strides.hpp"
#include "openvino/op/op.hpp"
#include "openvino/op/util/multi_subgraph_base.hpp"

namespace ov {
namespace op {
Expand Down Expand Up @@ -74,14 +75,14 @@ class OPENVINO_API FrameworkNodeAttrs {
std::unordered_map<std::string, std::string> m_attrs;
};

class OPENVINO_API FrameworkNode : public Op {
class OPENVINO_API FrameworkNode : public MultiSubGraphOp {
public:
OPENVINO_OP("FrameworkNode", "util");
BWDCMP_RTTI_DECLARATION;

FrameworkNode() = default;

explicit FrameworkNode(const OutputVector& inputs, size_t output_size = 1);
explicit FrameworkNode(const OutputVector& inputs, size_t output_size = 1, size_t num_subgraphs = 0);

void validate_and_infer_types() override;

Expand All @@ -101,6 +102,10 @@ class OPENVINO_API FrameworkNode : public Op {
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;

void cache_output_descriptor();
void clone_to(FrameworkNode& dst) const;

protected:
FrameworkNode(const FrameworkNode&);

private:
std::vector<std::tuple<ov::PartialShape, ov::element::Type>> m_inputs_desc;
Expand Down
2 changes: 1 addition & 1 deletion src/core/src/node.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -452,7 +452,7 @@ std::set<ov::Input<ov::Node>> ov::Node::get_output_target_inputs(size_t i) const
}

ov::descriptor::Tensor& ov::Node::get_output_tensor(size_t i) const {
NGRAPH_CHECK(i < m_outputs.size(), "index '", i, "' out of range in get_output_tensor(size_t i)");
NGRAPH_CHECK(i < m_outputs.size(), "index '", i, "' out of range in get_output_tensor(size_t i) for node ", *this);
return m_outputs[i].get_tensor();
}

Expand Down
39 changes: 32 additions & 7 deletions src/core/src/op/util/framework_node.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4,25 +4,50 @@

#include "openvino/op/util/framework_node.hpp"

#include "ngraph/graph_util.hpp"
#include "itt.hpp"

BWDCMP_RTTI_DEFINITION(ov::op::util::FrameworkNode);

ov::op::util::FrameworkNode::FrameworkNode(const OutputVector& inputs, size_t output_size) : Op(inputs) {
ov::op::util::FrameworkNode::FrameworkNode(const OutputVector& inputs, size_t output_size, size_t num_subgraphs)
: MultiSubGraphOp(inputs, num_subgraphs) {
set_output_size(output_size);
constructor_validate_and_infer_types();
}

ov::op::util::FrameworkNode::FrameworkNode(const ov::op::util::FrameworkNode& other) : MultiSubGraphOp() {
set_arguments(other.input_values());
other.clone_to(*this);
}

void ov::op::util::FrameworkNode::clone_to(ov::op::util::FrameworkNode& dst) const {
dst.set_output_size(m_output_descriptions.size());

for (size_t i = 0; i < get_output_size(); ++i) {
dst.set_output_type(i, get_output_element_type(i), get_output_partial_shape(i));
}
dst.m_inputs_desc = m_inputs_desc;
dst.m_output_desc = m_output_desc;
dst.m_attrs = m_attrs;

for (int i = 0; i < dst.m_bodies.size(); i++) {
dst.m_bodies.push_back(ov::clone_model(*get_function(i)));
}

for (auto& input_description : m_input_descriptions[0]) {
dst.m_input_descriptions[0].push_back(input_description->copy());
}
for (auto& output_description : m_output_descriptions[0]) {
dst.m_output_descriptions[0].push_back(output_description->copy());
}
dst.validate_and_infer_types();
}

std::shared_ptr<ov::Node> ov::op::util::FrameworkNode::clone_with_new_inputs(const OutputVector& new_args) const {
NGRAPH_OP_SCOPE(FrameworkNode_clone_with_new_inputs);
check_new_args_count(this, new_args);
auto node = std::make_shared<op::util::FrameworkNode>(new_args);
for (size_t i = 0; i < get_output_size(); ++i) {
node->set_output_type(i, get_output_element_type(i), get_output_partial_shape(i));
}
node->m_inputs_desc = m_inputs_desc;
node->m_output_desc = m_output_desc;
node->m_attrs = m_attrs;
clone_to(*node);
return node;
}

Expand Down
18 changes: 8 additions & 10 deletions src/core/src/pass/serialize.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -501,7 +501,14 @@ class XmlSerializer : public ngraph::AttributeVisitor {
m_xml_node.append_attribute(name.c_str()).set_value(create_atribute_list(adapter).c_str());
}
void on_adapter(const std::string& name, ngraph::ValueAccessor<std::shared_ptr<Function>>& adapter) override {
if (name == "body" || name == "then_body" || name == "else_body") {
if (name == "net") {
ngfunction_2_ir(m_xml_node,
*adapter.get(),
m_custom_opsets,
m_constant_write_handler,
m_version,
m_deterministic);
} else {
// TI, Loop do not have attributtes as regular ops, it is necessary to append "body"
// to layer above (m_xml_node.parent()) as in ngfunction_2_ir() layer (m_xml_node) with empty attributes
// is removed.
Expand All @@ -514,15 +521,6 @@ class XmlSerializer : public ngraph::AttributeVisitor {
m_deterministic);
xml_body.remove_attribute("name");
xml_body.remove_attribute("version");
} else if (name == "net") {
ngfunction_2_ir(m_xml_node,
*adapter.get(),
m_custom_opsets,
m_constant_write_handler,
m_version,
m_deterministic);
} else {
NGRAPH_CHECK(false, "Unsupported Model name.");
}
}
};
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
// This is torch jit dependent piece of code for decoding TS jit graph inside Torch runtime
// This code was copied from inside PT source tree from POC branch, it cannot be compiled withou torch dependencies

#pragma once

#include <map>
#include <exception>
#include <memory>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -114,6 +114,9 @@ struct Decoder { // TODO: Is it required to be enable_shared_from_this?
// Decide whether we need an equivalent member for integer representation (in this case a map is required to understand what it means)
virtual std::string get_op_type() const = 0;

// Returns PT node schema as a string
virtual std::string get_schema() const = 0;

// TODO: use canonical name output_size
virtual size_t num_of_outputs () const = 0;

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,182 @@
#pragma once

#include <openvino/frontend/exception.hpp>
#include <openvino/frontend/node_context.hpp>
#include <openvino/frontend/pytorch/decoder.hpp>
#include <openvino/opsets/opset8.hpp>

#include "exception.hpp"

namespace ov {
namespace frontend {
namespace pytorch {

typedef std::map<size_t, Output<Node>> TensorMap;

class NodeContext : public frontend::NodeContext {
public:
NodeContext(std::shared_ptr<Decoder> decoder,
TensorMap* tensor_map,
ParameterVector* external_parameters,
const TensorMap& ext_tensor_map)
: // TODO: why the following ctor is explicit?
frontend::NodeContext(decoder->get_op_type()),
m_decoder(decoder),
m_tensor_map(tensor_map),
m_ext_tensor_map(ext_tensor_map),
m_external_parameters(external_parameters) {}

// Do not search for input in tensor map; try to access it as a constant of specified type T and return its value
template <typename T>
T const_input(size_t index) const;

size_t get_input_size() const override {
return m_decoder->inputs().size();
};

// Search for input in tensor map and return an output port for already converted op
// TODO: int due to base class uses it, but naturally it should be size_t for PT
Output<Node> get_input(int index) const override {
// std::cerr << "Trying to map input to ngraph...";
OV_FRONTEND_REQUIRE(!m_decoder->input_is_none(index));
auto input = m_decoder->input(index);
OV_FRONTEND_REQUIRE(m_tensor_map->count(input));
return m_tensor_map->at(input);
}

// TODO: upstream to base class
OutputVector inputs() const {
OutputVector res;
for (size_t input : m_decoder->inputs()) {
// std::cerr << "Searching for input: " << input->unique() << "\n";
OV_FRONTEND_REQUIRE(m_tensor_map->find(input) != m_tensor_map->end());
res.push_back(m_tensor_map->at(input));
}
return res;
}

bool input_is_none(size_t index) const {
return m_decoder->input_is_none(index);
}

// Convert the resulting value of this node to ngraph Constant; works correctly only for nodes that produce
// constant value, naturally for prim::Constant
OutputVector as_constant() const {
return m_decoder->as_constant();
}

/*
TODO: Should be uncommented when explicit NodeContext ctor won't require passing op_type
const std::string& get_op_type() const override {
return m_decoder->get_op_type();
}
*/

std::string get_schema() const {
return m_decoder->get_schema();
}

size_t num_of_outputs() const {
return m_decoder->num_of_outputs();
}

std::vector<size_t> outputs() const {
return m_decoder->outputs();
}

std::shared_ptr<Node> mark_node(std::shared_ptr<Node> ov_node) const {
return m_decoder->mark_node(ov_node);
}

void mark_nodes(std::vector<std::shared_ptr<Node>> ov_nodes) const {
return m_decoder->mark_nodes(ov_nodes);
}

Output<Node> mark_output(Output<Node> ov_output) const {
return m_decoder->mark_node(ov_output.get_node_shared_ptr());
}

Any get_attribute_as_any(const std::string&) const override {
throw std::runtime_error(
"There is no any named attributes in Pytorch node, query by attribute name is not implemented");
}

void mutate_input(size_t index, Output<Node> ov_output) {
OV_FRONTEND_REQUIRE(!m_decoder->input_is_none(index));
auto input = m_decoder->input(index);
OV_FRONTEND_REQUIRE(m_tensor_map->count(input));
m_tensor_map->at(input).get_tensor().set_names({std::to_string(input) + "_"});
// TODO: find out why this doesn't work
ov_output.get_tensor().add_names({std::to_string(input)});
(*m_tensor_map)[input] = ov_output;
m_mutated_tensors.insert(input);
}

std::set<size_t> get_mutated_tensors() const {
return m_mutated_tensors;
}

std::shared_ptr<Decoder> get_decoder() const {
return m_decoder;
}

void add_tensor_to_context(size_t index, Output<Node> ov_output) {
if (m_tensor_map->count(index)) {
std::cerr << "[ WARNING ] Current context has tensor. Rewriting." << std::endl;
}
ov_output.get_tensor().add_names({std::to_string(index)});
(*m_tensor_map)[index] = ov_output;
}

Output<Node> get_tensor_from_model(size_t index) {
if (m_tensor_map->find(index) != m_tensor_map->end()) {
return m_tensor_map->at(index);
} else {
return Output<Node>();
}
}

Output<Node> get_tensor_from_model_or_create_input(size_t index) {
if (m_tensor_map->find(index) != m_tensor_map->end()) {
return m_tensor_map->at(index);
} else {
// nested subgraphs case
auto parameter = std::make_shared<opset8::Parameter>(element::dynamic, PartialShape::dynamic());
parameter->get_output_tensor(0).add_names({std::to_string(index)});
(*m_tensor_map)[index] = parameter;
m_external_parameters->push_back(parameter);
std::cout << "Nested case, created: " << parameter << std::endl;
return parameter;
}
}

Output<Node> get_input_from_visible_context(size_t index) {
OV_FRONTEND_REQUIRE(index < get_input_size());
auto input_tensor = get_input(index);
auto input_node = input_tensor.get_node_shared_ptr();
if (std::dynamic_pointer_cast<opset8::Parameter>(input_node)) {
// We need to look into external context for inputs that would be feed into this parameter
auto name = input_node->get_output_tensor(0).get_any_name();
size_t tensor_idx = (size_t)std::stoll(name);
if (m_ext_tensor_map.count(tensor_idx)) {
input_tensor = m_ext_tensor_map.at(tensor_idx);
}
}
return input_tensor;
}

std::shared_ptr<ov::Model> convert_subgraph(size_t index);

private:
std::shared_ptr<opset8::Constant> get_constant_at_input(size_t index) const;

std::shared_ptr<Decoder> m_decoder;
std::set<size_t> m_mutated_tensors;
TensorMap* m_tensor_map;
const TensorMap& m_ext_tensor_map;
ParameterVector* m_external_parameters;
};

} // namespace pytorch
} // namespace frontend
} // namespace ov
Loading

0 comments on commit a53788e

Please sign in to comment.