Skip to content

Commit 91314c2

Browse files
authored
Merge pull request #2 from itikhono/itikhono/tf_fe/master
Resolve review comments
2 parents 84471a9 + c330468 commit 91314c2

File tree

59 files changed

+1414
-395
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

59 files changed

+1414
-395
lines changed

docs/template_plugin/src/template_executable_network.cpp

+13-11
Original file line numberDiff line numberDiff line change
@@ -8,11 +8,16 @@
88
#include <ie_plugin_config.hpp>
99
#include <threading/ie_executor_manager.hpp>
1010

11+
#include "cpp/ie_cnn_network.h"
12+
#include "ie_icnn_network.hpp"
1113
#include "ie_icore.hpp"
14+
#include "ie_ngraph_utils.hpp"
15+
#include "openvino/core/except.hpp"
1216
#include "template/template_config.hpp"
1317
#include "template_itt.hpp"
1418
#include "template_plugin.hpp"
1519
#include "transformations/serialize.hpp"
20+
#include "transformations/utils/utils.hpp"
1621

1722
using namespace TemplatePlugin;
1823

@@ -66,18 +71,19 @@ TemplatePlugin::ExecutableNetwork::ExecutableNetwork(std::istream& model,
6671
model.read(dataBlob->buffer(), dataSize);
6772
}
6873

74+
auto cnnnetwork = _plugin->GetCore()->ReadNetwork(xmlString, std::move(dataBlob));
75+
6976
// TODO: implement Import / Export of configuration options and merge with `cfg`
7077
// TODO: implement Import / Export of network precisions, layouts, preprocessing info
71-
InferenceEngine::InputsDataMap inputInfoMap;
72-
InferenceEngine::OutputsDataMap outputInfoMap;
78+
InferenceEngine::InputsDataMap inputInfoMap = cnnnetwork.getInputsInfo();
79+
InferenceEngine::OutputsDataMap outputInfoMap = cnnnetwork.getOutputsInfo();
7380

74-
auto cnnnetwork = _plugin->GetCore()->ReadNetwork(xmlString, std::move(dataBlob));
75-
76-
setNetworkInputs(cnnnetwork.getInputsInfo());
77-
setNetworkOutputs(cnnnetwork.getOutputsInfo());
81+
setNetworkInputs(inputInfoMap);
82+
setNetworkOutputs(outputInfoMap);
7883
SetPointerToPlugin(_plugin->shared_from_this());
7984

8085
try {
86+
// TODO: remove compilation, network is already compiled and serialized in compiled form
8187
CompileNetwork(cnnnetwork.getFunction(), inputInfoMap, outputInfoMap);
8288
InitExecutor(); // creates thread-based executor using for async requests
8389
} catch (const InferenceEngine::Exception&) {
@@ -107,11 +113,7 @@ void TemplatePlugin::ExecutableNetwork::CompileNetwork(const std::shared_ptr<con
107113
// Generate backend specific blob mappings. For example Inference Engine uses not ngraph::Result nodes friendly name
108114
// as inference request output names but the name of the layer before.
109115
for (auto&& result : _function->get_results()) {
110-
auto previousOutput = result->get_input_source_output(0);
111-
auto outputName = previousOutput.get_node()->get_friendly_name();
112-
if (previousOutput.get_node()->get_output_size() > 1) {
113-
outputName += '.' + std::to_string(previousOutput.get_index());
114-
}
116+
auto outputName = ngraph::op::util::create_ie_output_name(result->input_value(0));
115117
_outputIndex.emplace(outputName, _function->get_result_index(result));
116118
}
117119
for (auto&& parameter : _function->get_parameters()) {

docs/template_plugin/src/template_executable_network.hpp

+1
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,7 @@ class ExecutableNetwork : public InferenceEngine::ExecutableNetworkThreadSafeDef
4343

4444
private:
4545
friend class TemplateInferRequest;
46+
friend class Plugin;
4647

4748
void CompileNetwork(const std::shared_ptr<const ngraph::Function>& function,
4849
const InferenceEngine::InputsDataMap& inputInfoMap,

docs/template_plugin/src/template_plugin.cpp

+9-5
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
#include <transformations/rt_info/fused_names_attribute.hpp>
1818
#include <transformations/convert_precision.hpp>
1919

20+
#include "openvino/core/except.hpp"
2021
#include "template/template_config.hpp"
2122
#include "template_itt.hpp"
2223
#include "template_plugin.hpp"
@@ -68,7 +69,7 @@ std::shared_ptr<ngraph::Function> TransformNetwork(const std::shared_ptr<const n
6869
// TODO: add post-processing based on outputsInfoMap
6970
// Example: register CommonOptimizations transformation from transformations library
7071
passManager.register_pass<ngraph::pass::CommonOptimizations>();
71-
// GAPI supports only FP32 networks for pre-processing
72+
// G-API supports only FP32 networks for pre-processing
7273
bool needF16toF32 = false;
7374
for (const auto& param : function->get_parameters()) {
7475
if (param->get_element_type() == ngraph::element::f16 &&
@@ -78,9 +79,10 @@ std::shared_ptr<ngraph::Function> TransformNetwork(const std::shared_ptr<const n
7879
break;
7980
}
8081
}
81-
if (needF16toF32)
82+
if (needF16toF32) {
8283
passManager.register_pass<ngraph::pass::ConvertPrecision>(
8384
precisions_array{{ngraph::element::f16, ngraph::element::f32}});
85+
}
8486
// Example: register plugin specific transformation
8587
passManager.register_pass<ngraph::pass::DecomposeDivideMatcher>();
8688
passManager.register_pass<ngraph::pass::ReluReluFusionMatcher>();
@@ -119,9 +121,11 @@ InferenceEngine::IExecutableNetworkInternal::Ptr Plugin::ImportNetwork(
119121
OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "Plugin::ImportNetwork");
120122

121123
auto fullConfig = Configuration{config, _cfg};
122-
return std::make_shared<ExecutableNetwork>(modelStream,
123-
fullConfig,
124-
std::static_pointer_cast<Plugin>(shared_from_this()));
124+
auto exec = std::make_shared<ExecutableNetwork>(modelStream,
125+
fullConfig,
126+
std::static_pointer_cast<Plugin>(shared_from_this()));
127+
SetExeNetworkInfo(exec, exec->_function);
128+
return exec;
125129
}
126130
// ! [plugin:import_network]
127131

docs/template_plugin/tests/functional/op_reference/base_reference_test.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ void CommonReferenceTest::Infer() {
6363
}
6464

6565
void CommonReferenceTest::Validate() {
66-
ASSERT_EQ(executableNetwork.get_results().size(), refOutData.size());
66+
ASSERT_EQ(executableNetwork.outputs().size(), refOutData.size());
6767
std::vector<ov::runtime::Tensor> outputs;
6868
for (const auto& result : function->get_results()) {
6969
auto name = ngraph::op::util::create_ie_output_name(result->input_value(0));

docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_exec_network.cpp

+12
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
//
44

55
#include "behavior/ov_exec_network.hpp"
6+
#include <common_test_utils/test_constants.hpp>
67

78
#include "ie_plugin_config.hpp"
89

@@ -27,6 +28,9 @@ const std::vector<std::map<std::string, std::string>> configs = {
2728
const std::vector<std::map<std::string, std::string>> multiConfigs = {
2829
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_TEMPLATE}}};
2930

31+
const std::vector<std::map<std::string, std::string>> heteroConfigs = {
32+
{{"TARGET_FALLBACK", CommonTestUtils::DEVICE_TEMPLATE}}};
33+
3034
INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests,
3135
OVExecNetwork,
3236
::testing::Combine(::testing::ValuesIn(netPrecisions),
@@ -47,4 +51,12 @@ INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests,
4751
::testing::Values(CommonTestUtils::DEVICE_AUTO),
4852
::testing::ValuesIn(multiConfigs)),
4953
OVExecNetwork::getTestCaseName);
54+
55+
INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests,
56+
OVExecNetwork,
57+
::testing::Combine(::testing::ValuesIn(netPrecisions),
58+
::testing::Values(CommonTestUtils::DEVICE_HETERO),
59+
::testing::ValuesIn(heteroConfigs)),
60+
OVExecNetwork::getTestCaseName);
61+
5062
} // namespace

docs/template_plugin/tests/functional/skip_tests_config.cpp

+10
Original file line numberDiff line numberDiff line change
@@ -19,5 +19,15 @@ std::vector<std::string> disabledTestPatterns() {
1919
// CVS-51758
2020
R"(.*InferRequestPreprocessConversionTest.*oLT=(NHWC|NCHW).*)",
2121
R"(.*InferRequestPreprocessDynamicallyInSetBlobTest.*oPRC=0.*oLT=1.*)",
22+
23+
// TODO: execution graph is not supported
24+
R"(.*ExecGraph.*)",
25+
26+
// TODO: support import / export of precisions in template plugin
27+
R"(.*smoke_Hetero_BehaviorTests.*OVExecNetwork.ieImportExportedFunction.*)",
28+
R"(.*smoke_BehaviorTests.*OVExecNetwork.ieImportExportedFunction.*)",
29+
30+
// TODO: Round with f16 is not supported
31+
R"(.*smoke_Hetero_BehaviorTests.*OVExecNetwork.*readFromV10IR.*)",
2232
};
2333
}

inference-engine/ie_bridges/python/wheel/meta/openvino-dev.requirements.txt

+1-1
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ texttable~=1.6.3
1111
py-cpuinfo>=7.0.0
1212
PyYAML>=5.4.1
1313
pillow>=8.1.2
14-
scikit-image~=0.17.2
14+
scikit-image>=0.17.2
1515
scikit-learn>=0.24.1
1616
yamlloader>=0.5
1717
shapely>=1.7.1

inference-engine/samples/benchmark_app/benchmark_app.hpp

+1
Original file line numberDiff line numberDiff line change
@@ -337,6 +337,7 @@ static void showUsage() {
337337
std::cout << " -l \"<absolute_path>\" " << custom_cpu_library_message << std::endl;
338338
std::cout << " Or" << std::endl;
339339
std::cout << " -c \"<absolute_path>\" " << custom_cldnn_message << std::endl;
340+
std::cout << " -hint \"performance hint (latency or throughput)\" " << hint_message << std::endl;
340341
std::cout << " -api \"<sync/async>\" " << api_message << std::endl;
341342
std::cout << " -niter \"<integer>\" " << iterations_count_message << std::endl;
342343
std::cout << " -nireq \"<integer>\" " << infer_requests_count_message << std::endl;

inference-engine/src/gna_plugin/gna2_model_debug_log.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -304,11 +304,11 @@ static std::vector<std::string> GetParamaterNames(Gna2OperationType type) {
304304
operationParamaterNamesMap.find(type)->second : std::vector<std::string> {};
305305
}
306306

307-
typedef void (*dumpParamaters) (std::ostream&, void**, size_t, const std::vector<std::string>);
307+
typedef void (*dumpParameters) (std::ostream&, void**, size_t, const std::vector<std::string>);
308308

309-
static dumpParamaters GetParamDumpFunc(Gna2OperationType type) {
309+
static dumpParameters GetParamDumpFunc(Gna2OperationType type) {
310310
// This map must be aligned with operationParamaterNamesMap in this file
311-
static const std::map<Gna2OperationType, dumpParamaters> dumpParamMap = {
311+
static const std::map<Gna2OperationType, dumpParameters> dumpParamMap = {
312312
{Gna2OperationTypeConvolution, DumpConvolutionParameters},
313313
{Gna2OperationTypeCopy, DumpCopyParameters},
314314
{Gna2OperationTypeFullyConnectedAffine, DumpFCAffineParameters},

inference-engine/src/hetero_plugin/hetero_executable_network.cpp

+89
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,14 @@
66
#include "hetero_executable_network.hpp"
77
#include "hetero_async_infer_request.hpp"
88
#include "hetero_itt.hpp"
9+
#include "ie_precision.hpp"
10+
#include "openvino/core/dimension.hpp"
11+
#include "openvino/core/except.hpp"
12+
#include "openvino/core/type.hpp"
13+
#include "openvino/core/type/element_type.hpp"
14+
#include "openvino/op/result.hpp"
15+
#include "transformations/utils/utils.hpp"
16+
#include "openvino/op/parameter.hpp"
917
#include "xml_parse_utils.h"
1018
#include <caseless.hpp>
1119

@@ -63,6 +71,7 @@ HeteroExecutableNetwork::HeteroExecutableNetwork(const InferenceEngine::CNNNetwo
6371
#ifndef NDEBUG
6472
dumpDotFile = true;
6573
#endif
74+
6675
QueryNetworkResult queryNetworkResult;
6776
auto orderedOps = clonedFunction->get_ordered_ops();
6877
bool allEmpty = true;
@@ -548,6 +557,44 @@ HeteroExecutableNetwork::HeteroExecutableNetwork(std::istream&
548557
});
549558
}
550559

560+
const auto parseNode = [] (const pugi::xml_node & xml_node, bool is_param) ->
561+
std::shared_ptr<const ov::Node> {
562+
const std::string operation_name = GetStrAttr(xml_node, "operation_name");
563+
const auto elementType =
564+
ov::EnumNames<ov::element::Type_t>::as_enum(GetStrAttr(xml_node, "element_type"));
565+
566+
std::vector<ov::Dimension> partialShape;
567+
pugi::xml_node partialShapeNode = xml_node.child("partial_shape");
568+
FOREACH_CHILD(dimNode, partialShapeNode, "dim") {
569+
partialShape.emplace_back(ov::Dimension(GetInt64Attr(dimNode, "value")));
570+
}
571+
572+
pugi::xml_node tensorNamesNode = xml_node.child("tensor_names");
573+
std::unordered_set<std::string> tensorNames;
574+
FOREACH_CHILD(tensorNameNode, tensorNamesNode, "tensor_name") {
575+
tensorNames.insert(GetStrAttr(tensorNameNode, "value"));
576+
}
577+
578+
std::shared_ptr<ov::Node> node = std::make_shared<ov::op::v0::Parameter>(elementType, partialShape);
579+
if (!is_param)
580+
node = std::make_shared<ov::op::v0::Result>(node);
581+
node->set_friendly_name(operation_name);
582+
node->output(0).get_tensor().add_names(tensorNames);
583+
584+
return node;
585+
};
586+
(void)parseNode;
587+
588+
pugi::xml_node parametersNode = heteroNode.child("parameters");
589+
FOREACH_CHILD(parameterNode, parametersNode, "parameter") {
590+
_parameters.emplace_back(parseNode(parameterNode, true));
591+
}
592+
593+
pugi::xml_node resultsNode = heteroNode.child("results");
594+
FOREACH_CHILD(resultNode, resultsNode, "result") {
595+
_results.emplace_back(parseNode(resultNode, false));
596+
}
597+
551598
// save state
552599
this->_config = importedConfigs;
553600
this->_networks = std::move(descs);
@@ -559,6 +606,8 @@ void HeteroExecutableNetwork::Export(std::ostream& heteroModel) {
559606
auto heteroNode = doc.append_child("hetero");
560607
heteroNode.append_attribute("name").set_value(_name.c_str());
561608

609+
// CNNNetwork inputs and outputs information
610+
562611
auto inputsNode = heteroNode.append_child("inputs");
563612
for (auto&& networkInput : _networkInputs) {
564613
inputsNode.append_child("input").append_attribute("name").set_value(networkInput.first.c_str());
@@ -569,6 +618,46 @@ void HeteroExecutableNetwork::Export(std::ostream& heteroModel) {
569618
outputsNode.append_child("output").append_attribute("name").set_value(networkInput.first.c_str());
570619
}
571620

621+
const auto serializeNode = [&] (const std::shared_ptr<const ov::Node>& node,
622+
pugi::xml_node & xml_node) {
623+
const bool is_result = ov::is_type<ov::op::v0::Result>(node);
624+
const std::string name = is_result ?
625+
ngraph::op::util::create_ie_output_name(node->input_value(0)) :
626+
node->get_friendly_name();
627+
xml_node.append_attribute("operation_name").set_value(name.c_str());
628+
xml_node.append_attribute("element_type").set_value(
629+
node->get_output_element_type(0).get_type_name().c_str());
630+
631+
const auto & pShape = node->get_output_partial_shape(0);
632+
OPENVINO_ASSERT(pShape.rank().is_static(), "Serialization of shapes with dynamic rank is not supported");
633+
auto partialShapeNode = xml_node.append_child("partial_shape");
634+
for (auto && dim : node->get_output_partial_shape(0)) {
635+
if (dim.is_dynamic())
636+
partialShapeNode.append_child("dim").append_attribute("value").set_value("-1");
637+
else
638+
partialShapeNode.append_child("dim").append_attribute("value").set_value(std::to_string(dim.get_length()).c_str());
639+
}
640+
641+
auto tensorNamesNode = xml_node.append_child("tensor_names");
642+
for (auto & tensorName : node->get_output_tensor(0).get_names()) {
643+
tensorNamesNode.append_child("tensor_name").append_attribute("value").set_value(tensorName.c_str());
644+
}
645+
};
646+
647+
// ngraph parameters info
648+
auto subnetworkParamsNode = heteroNode.append_child("parameters");
649+
for (auto&& parameter : getInputs()) {
650+
auto parameterNode = subnetworkParamsNode.append_child("parameter");
651+
serializeNode(parameter, parameterNode);
652+
}
653+
654+
// ngraph results info
655+
auto subnetworkResultsNode = heteroNode.append_child("results");
656+
for (auto&& result : getOutputs()) {
657+
auto parameterNode = subnetworkResultsNode.append_child("result");
658+
serializeNode(result, parameterNode);
659+
}
660+
572661
auto subnetworksNode = heteroNode.append_child("subnetworks");
573662
for (auto&& subnetwork : _networks) {
574663
auto subnet = subnetwork._clonedNetwork;

inference-engine/src/hetero_plugin/hetero_plugin.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -42,8 +42,8 @@ std::vector<std::string> supported_configKeys {
4242

4343
} // namespace
4444

45-
InferenceEngine::IExecutableNetworkInternal::Ptr Engine::LoadExeNetworkImpl(const InferenceEngine::CNNNetwork& network,
46-
const Configs& config) {
45+
InferenceEngine::IExecutableNetworkInternal::Ptr Engine::LoadExeNetworkImpl(const InferenceEngine::CNNNetwork& network,
46+
const Configs& config) {
4747
if (GetCore() == nullptr) {
4848
IE_THROW() << "Please, work with HETERO device via InferencEngine::Core object";
4949
}

inference-engine/src/inference_engine/include/openvino/runtime/executable_network.hpp

+46-6
Original file line numberDiff line numberDiff line change
@@ -60,18 +60,58 @@ class OPENVINO_RUNTIME_API ExecutableNetwork {
6060
std::shared_ptr<const Function> get_runtime_function() const;
6161

6262
/**
63-
* @brief Get parameters of executeble graph function
63+
* @brief Get inputs of executable graph function
6464
*
65-
* @return vector of paramter nodes
65+
* @return vector of inputs
6666
*/
67-
ParameterVector get_parameters() const;
67+
std::vector<ov::Output<const ov::Node>> inputs() const;
68+
/**
69+
* @brief Get input of executable graph function
70+
*
71+
* @return Function input or throw ov::Exception in case of several outputs
72+
*/
73+
ov::Output<const ov::Node> input() const;
74+
/**
75+
* @brief Get input of executable graph function
76+
*
77+
* @param i input index
78+
* @return Function input or throw ov::Exception if input wasn't found
79+
*/
80+
ov::Output<const ov::Node> input(size_t i) const;
81+
/**
82+
* @brief Get input of executable graph function
83+
*
84+
* @param tensor_name The input tensor name
85+
* @return Function output or throw ov::Exception if input wasn't found
86+
*/
87+
ov::Output<const ov::Node> input(const std::string& tensor_name) const;
6888

6989
/**
70-
* @brief Get results of executeble graph function
90+
* @brief Get outputs of executable graph function
91+
*
92+
* @return vector of outputs
93+
*/
94+
std::vector<ov::Output<const ov::Node>> outputs() const;
95+
/**
96+
* @brief Get output of executable graph function
97+
*
98+
* @return Function output or throw ov::Exception in case of several outputs
99+
*/
100+
ov::Output<const ov::Node> output() const;
101+
/**
102+
* @brief Get output of executable graph function
103+
*
104+
* @param i output index
105+
* @return Function output or throw ov::Exception if output wasn't found
106+
*/
107+
ov::Output<const ov::Node> output(size_t i) const;
108+
/**
109+
* @brief Get output of executable graph function
71110
*
72-
* @return vector of result nodes
111+
* @param tensor_name The output tensor name
112+
* @return Function output or throw ov::Exception if output wasn't found
73113
*/
74-
ResultVector get_results() const;
114+
ov::Output<const ov::Node> output(const std::string& tensor_name) const;
75115

76116
/**
77117
* @brief Creates an inference request object used to infer the network.

0 commit comments

Comments
 (0)