From 564f3a696949a6a5763df7b1ac180514937ea0ee Mon Sep 17 00:00:00 2001 From: Anastasia Kuporosova Date: Fri, 12 Nov 2021 22:44:44 +0300 Subject: [PATCH] [Python API] add new api (#8149) * Bind exec core ov (#50) * Output const node python tests (#52) * add python bindings tests for Output * add proper tests * add new line * rename ie_version to version * Pszmel/bind infer request (#51) * remove set_batch, get_blob and set_blob * update InferRequest class * change InferenceEngine::InferRequest to ov::runtime::InferRequest * update set_callback body * update bindings to reflect ov::runtime::InferRequest * bind set_input_tensor and get_input_tensor * style fix * clen ie_infer_queue.cpp * Bind exec core ov (#50) * bind core, exec_net classes * rm unused function * add new line * rename ie_infer_request -> infer_request * update imports * update __init__.py * update ie_api.py * Replace old containers with the new one * create impl for create_infer_request * comment out infer_queue to avoid errors with old infer_request * update infer_request bind to reflect new infer_request api * comment out inpuit_info from ie_network to avoid errors with old containers * Register new containers and comment out InferQueue * update infer request tests * style fix * remove unused imports * remove unused imports and 2 methods * add tests to cover all new methods from infer_request * style fix * add test * remove registration of InferResults * update name of exception_ptr parameter * update the loops that iterate through inputs and outputs * clean setCustomCallbacks * style fix * add Tensor import * style fix * update infer and normalize_inputs * style fix * rename startTime and endTime * Create test for mixed keys as infer arguments * update infer function * update return type of infer Co-authored-by: Bartek Szmelczynski * fix get_version * fix opaque issue * some cosmetic changes * fix codestyle in tests * make tests green * Extend python InferRequest * Extend python Function * Change return value of infer call * Fix missing precisions conversions in CPU plugin * Rework of runtime for new tests * Fixed onnx reading in python tests * Edit compatibility tests * Edit tests * Add FLOAT_LIKE xfails * [Python API] bind ProfilingInfo (#55) * bind ProfilingInfo * Add tests * Fix code style * Add property * fix codestyle * Infer new request method (#56) * fix conflicts, add infer_new_request function * remove redundant functions, fix style * revert the unwanted changes * revert removal of the Blob * revert removal of isTblob * add add_extension from path * codestyle * fix win build * add inputs-outputs to function * Hot-fix CPU plugin with precision * fix start_async * add performance hint to time infer (#8480) * Updated common migration pipeline (#8176) * Updated common migration pipeline * Fixed merge issue * Added new model and extended example * Fixed typo * Added v10-v11 comparison * Avoid redundant graph nodes scans (#8415) * Refactor work with env variables (#8208) * del MO_ROOT * del MO_ROOT from common_utils.py * add MO_PATH to common_utils.py * change mo_path * [IE Sample Scripts] Use cmake to build samples (#8442) * Use cmake to build samples * Add the option to set custom build output folder * Remove opset8 from compatibility ngraph python API (#8452) * [GPU] OneDNN gpu submodule update to version 2.5 (#8449) * [GPU] OneDNN gpu submodule update to version 2.5 * [GPU] Updated onednn submodule and added layout optimizer fix * Install rules for static libraries case (#8384) * Proper cmake install for static libraries case * Added an ability to skip template plugin * Added install rules for VPU / GPU * Install more libraries * Fixed absolute TBB include paths * Disable GNA * Fixed issue with linker * Some fixes * Fixed linkage issues in tests * Disabled some tests * Updated CI pipelines * Fixed Windows linkage * Fixed custom_opset test for static casr * Fixed CVS-70313 * Continue on error * Fixed clanf-format * Try to fix Windows linker * Fixed compilation * Disable samples * Fixed samples build with THREADING=SEQ * Fixed link error on Windows * Fixed ieFuncTests * Added static Azure CI * Revert "Fixed link error on Windows" This reverts commit 78cca36fd21cdbd639216df6cca10df7f88bce3e. * Merge static and dynamic linux pipelines * Fixed Azure * fix codestyle Co-authored-by: Bartek Szmelczynski Co-authored-by: Piotr Szmelczynski Co-authored-by: jiwaszki Co-authored-by: Alexey Lebedev Co-authored-by: Victor Kuznetsov Co-authored-by: Ilya Churaev Co-authored-by: Tomasz Jankowski Co-authored-by: Dmitry Pigasin Co-authored-by: Artur Kulikowski Co-authored-by: Ilya Znamenskiy Co-authored-by: Ilya Lavrenov --- .../mkldnn_plugin/mkldnn_infer_request.cpp | 2 +- .../src/mkldnn_plugin/mkldnn_plugin.cpp | 5 +- .../nodes/common/cpu_convert.cpp | 8 +- .../src/mkldnn_plugin/utils/cpu_utils.hpp | 5 + .../bindings/python/src/openvino/__init__.py | 16 +- .../bindings/python/src/openvino/ie_api.py | 33 +- .../python/src/openvino/impl/__init__.py | 1 + .../python/src/pyopenvino/core/common.cpp | 42 +++ .../python/src/pyopenvino/core/common.hpp | 8 + .../python/src/pyopenvino/core/containers.cpp | 40 +-- .../python/src/pyopenvino/core/containers.hpp | 31 +- .../python/src/pyopenvino/core/core.cpp | 112 +++++++ .../pyopenvino/core/{ie_core.hpp => core.hpp} | 0 .../pyopenvino/core/executable_network.cpp | 107 +++++++ ...ble_network.hpp => executable_network.hpp} | 0 .../python/src/pyopenvino/core/ie_core.cpp | 160 ---------- .../pyopenvino/core/ie_executable_network.cpp | 91 ------ .../src/pyopenvino/core/ie_infer_queue.cpp | 209 ++++++------- .../src/pyopenvino/core/ie_infer_request.cpp | 210 ------------- .../src/pyopenvino/core/ie_infer_request.hpp | 43 --- .../python/src/pyopenvino/core/ie_network.cpp | 9 - .../python/src/pyopenvino/core/ie_version.cpp | 25 -- .../src/pyopenvino/core/infer_request.cpp | 288 +++++++++++++++++ .../src/pyopenvino/core/infer_request.hpp | 47 +++ .../src/pyopenvino/core/profiling_info.cpp | 28 ++ .../src/pyopenvino/core/profiling_info.hpp | 11 + .../python/src/pyopenvino/core/version.cpp | 24 ++ .../core/{ie_version.hpp => version.hpp} | 0 .../python/src/pyopenvino/graph/function.cpp | 114 ++++++- .../src/pyopenvino/graph/node_output.cpp | 67 +--- .../src/pyopenvino/graph/node_output.hpp | 75 ++++- .../python/src/pyopenvino/pyopenvino.cpp | 52 +-- runtime/bindings/python/tests/__init__.py | 2 + runtime/bindings/python/tests/conftest.py | 1 + runtime/bindings/python/tests/runtime.py | 110 ++----- .../tests/test_inference_engine/helpers.py | 35 --- .../tests/test_inference_engine/test_core.py | 235 ++++++++------ .../test_executable_network.py | 295 ++++++++++++++++++ .../test_infer_request.py | 266 +++++++++------- .../test_output_const_node.py | 83 +++++ .../test_inference_engine/test_tensor.py | 16 +- .../python/tests/test_ngraph/test_basic.py | 14 +- .../test_ngraph/test_sequence_processing.py | 4 +- .../python/tests/test_onnx/test_backend.py | 8 +- .../test_onnx/test_onnx_external_data.py | 6 +- .../tests/test_onnx/test_onnx_import.py | 24 +- .../python/tests/test_onnx/test_ops_unary.py | 7 +- .../tests/test_onnx/utils/onnx_helpers.py | 8 +- 48 files changed, 1797 insertions(+), 1180 deletions(-) create mode 100644 runtime/bindings/python/src/pyopenvino/core/core.cpp rename runtime/bindings/python/src/pyopenvino/core/{ie_core.hpp => core.hpp} (100%) create mode 100644 runtime/bindings/python/src/pyopenvino/core/executable_network.cpp rename runtime/bindings/python/src/pyopenvino/core/{ie_executable_network.hpp => executable_network.hpp} (100%) delete mode 100644 runtime/bindings/python/src/pyopenvino/core/ie_core.cpp delete mode 100644 runtime/bindings/python/src/pyopenvino/core/ie_executable_network.cpp delete mode 100644 runtime/bindings/python/src/pyopenvino/core/ie_infer_request.cpp delete mode 100644 runtime/bindings/python/src/pyopenvino/core/ie_infer_request.hpp delete mode 100644 runtime/bindings/python/src/pyopenvino/core/ie_version.cpp create mode 100644 runtime/bindings/python/src/pyopenvino/core/infer_request.cpp create mode 100644 runtime/bindings/python/src/pyopenvino/core/infer_request.hpp create mode 100644 runtime/bindings/python/src/pyopenvino/core/profiling_info.cpp create mode 100644 runtime/bindings/python/src/pyopenvino/core/profiling_info.hpp create mode 100644 runtime/bindings/python/src/pyopenvino/core/version.cpp rename runtime/bindings/python/src/pyopenvino/core/{ie_version.hpp => version.hpp} (100%) delete mode 100644 runtime/bindings/python/tests/test_inference_engine/helpers.py create mode 100644 runtime/bindings/python/tests/test_inference_engine/test_executable_network.py create mode 100644 runtime/bindings/python/tests/test_inference_engine/test_output_const_node.py diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_infer_request.cpp b/inference-engine/src/mkldnn_plugin/mkldnn_infer_request.cpp index af900f7170db91..32d1f8494ffab3 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_infer_request.cpp +++ b/inference-engine/src/mkldnn_plugin/mkldnn_infer_request.cpp @@ -305,7 +305,7 @@ InferenceEngine::Blob::Ptr MKLDNNPlugin::MKLDNNInferRequest::GetBlob(const std:: desc.getShape().getRank())) : MemoryDescUtils::convertToTensorDesc(desc); const auto &tensorDesc = data->getTensorDesc(); - if (expectedTensorDesc.getPrecision() != tensorDesc.getPrecision()) { + if (expectedTensorDesc.getPrecision() != normalizeToSupportedPrecision(tensorDesc.getPrecision())) { IE_THROW(ParameterMismatch) << "Network input and output use the same name: " << name << " but expect blobs with different precision: " << tensorDesc.getPrecision() << " for input and " << expectedTensorDesc.getPrecision() diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp b/inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp index 13465128d72ec4..1a4efd3fbc0c7d 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp +++ b/inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp @@ -136,7 +136,6 @@ static void TransformationUpToCPUSpecificOpSet(std::shared_ptr manager.register_pass( std::vector{ ngraph::element::i8, ngraph::element::u8, ngraph::element::i4, ngraph::element::u4 }); } - auto get_convert_precisions = []() { precisions_array array = { {ngraph::element::i64, ngraph::element::i32}, @@ -443,8 +442,10 @@ Engine::LoadExeNetworkImpl(const InferenceEngine::CNNNetwork &network, const std InferenceEngine::InputsDataMap _networkInputs = network.getInputsInfo(); for (const auto &ii : _networkInputs) { auto input_precision = ii.second->getPrecision(); - if (input_precision != InferenceEngine::Precision::FP32 && + if (input_precision != InferenceEngine::Precision::FP64 && + input_precision != InferenceEngine::Precision::FP32 && input_precision != InferenceEngine::Precision::I32 && + input_precision != InferenceEngine::Precision::U32 && input_precision != InferenceEngine::Precision::U16 && input_precision != InferenceEngine::Precision::I16 && input_precision != InferenceEngine::Precision::I8 && diff --git a/inference-engine/src/mkldnn_plugin/nodes/common/cpu_convert.cpp b/inference-engine/src/mkldnn_plugin/nodes/common/cpu_convert.cpp index 5b47b476b0a565..8763b551af9597 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/common/cpu_convert.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/common/cpu_convert.cpp @@ -103,7 +103,13 @@ void cpu_convert(const void *srcPtr, void *dstPtr, Precision srcPrc, Precision d MKLDNN_CVT(BF16, I64), MKLDNN_CVT(BF16, FP32), MKLDNN_CVT(BF16, BOOL), MKLDNN_CVT(BOOL, U8), MKLDNN_CVT(BOOL, I8), MKLDNN_CVT(BOOL, U16), MKLDNN_CVT(BOOL, I16), MKLDNN_CVT(BOOL, I32), MKLDNN_CVT(BOOL, U64), - MKLDNN_CVT(BOOL, I64), MKLDNN_CVT(BOOL, FP32), MKLDNN_CVT(BOOL, BF16)); + MKLDNN_CVT(BOOL, I64), MKLDNN_CVT(BOOL, FP32), MKLDNN_CVT(BOOL, BF16), + MKLDNN_CVT(FP64, U8), MKLDNN_CVT(FP64, I8), MKLDNN_CVT(FP64, U16), + MKLDNN_CVT(FP64, I16), MKLDNN_CVT(FP64, I32), MKLDNN_CVT(FP64, U64), + MKLDNN_CVT(FP64, I64), MKLDNN_CVT(FP64, FP32), MKLDNN_CVT(FP64, BF16), MKLDNN_CVT(FP64, BOOL), + MKLDNN_CVT(U32, U8), MKLDNN_CVT(U32, I8), MKLDNN_CVT(U32, U16), + MKLDNN_CVT(U32, I16), MKLDNN_CVT(U32, I32), MKLDNN_CVT(U32, U64), + MKLDNN_CVT(U32, I64), MKLDNN_CVT(U32, FP32), MKLDNN_CVT(U32, BF16), MKLDNN_CVT(U32, BOOL)); if (!ctx.converted) IE_THROW() << "cpu_convert can't convert from: " << srcPrc << " precision to: " << dstPrc; diff --git a/inference-engine/src/mkldnn_plugin/utils/cpu_utils.hpp b/inference-engine/src/mkldnn_plugin/utils/cpu_utils.hpp index 14fd043dce0824..acc456f8ba28bf 100644 --- a/inference-engine/src/mkldnn_plugin/utils/cpu_utils.hpp +++ b/inference-engine/src/mkldnn_plugin/utils/cpu_utils.hpp @@ -72,12 +72,17 @@ inline InferenceEngine::Precision normalizeToSupportedPrecision(InferenceEngine: case InferenceEngine::Precision::FP32: { break; } + case InferenceEngine::Precision::FP64: { + precision = InferenceEngine::Precision::FP32; + break; + } case InferenceEngine::Precision::BOOL: { precision = InferenceEngine::Precision::U8; break; } case InferenceEngine::Precision::U16: case InferenceEngine::Precision::I16: + case InferenceEngine::Precision::U32: case InferenceEngine::Precision::I64: case InferenceEngine::Precision::U64: { precision = InferenceEngine::Precision::I32; diff --git a/runtime/bindings/python/src/openvino/__init__.py b/runtime/bindings/python/src/openvino/__init__.py index 0334300387aa83..56e7605c38699a 100644 --- a/runtime/bindings/python/src/openvino/__init__.py +++ b/runtime/bindings/python/src/openvino/__init__.py @@ -15,9 +15,10 @@ from openvino.ie_api import BlobWrapper from openvino.ie_api import infer -from openvino.ie_api import async_infer -from openvino.ie_api import get_result +from openvino.ie_api import start_async from openvino.ie_api import blob_from_file +from openvino.ie_api import tensor_from_file +from openvino.ie_api import infer_new_request from openvino.impl import Dimension from openvino.impl import Function @@ -35,8 +36,7 @@ from openvino.pyopenvino import DataPtr from openvino.pyopenvino import TensorDesc from openvino.pyopenvino import get_version -from openvino.pyopenvino import StatusCode -from openvino.pyopenvino import InferQueue +#from openvino.pyopenvino import InferQueue from openvino.pyopenvino import InferRequest # TODO: move to ie_api? from openvino.pyopenvino import Blob from openvino.pyopenvino import PreProcessInfo @@ -45,6 +45,7 @@ from openvino.pyopenvino import ColorFormat from openvino.pyopenvino import PreProcessChannel from openvino.pyopenvino import Tensor +from openvino.pyopenvino import ProfilingInfo from openvino import opset1 from openvino import opset2 @@ -78,10 +79,9 @@ # this class will be removed Blob = BlobWrapper # Patching ExecutableNetwork -ExecutableNetwork.infer = infer +ExecutableNetwork.infer_new_request = infer_new_request # Patching InferRequest InferRequest.infer = infer -InferRequest.async_infer = async_infer -InferRequest.get_result = get_result +InferRequest.start_async = start_async # Patching InferQueue -InferQueue.async_infer = async_infer +#InferQueue.async_infer = async_infer diff --git a/runtime/bindings/python/src/openvino/ie_api.py b/runtime/bindings/python/src/openvino/ie_api.py index 925bc2ad5bb545..9199ccc1a273db 100644 --- a/runtime/bindings/python/src/openvino/ie_api.py +++ b/runtime/bindings/python/src/openvino/ie_api.py @@ -2,6 +2,8 @@ # SPDX-License-Identifier: Apache-2.0 import numpy as np +import copy +from typing import List from openvino.pyopenvino import TBlobFloat32 from openvino.pyopenvino import TBlobFloat64 @@ -15,6 +17,8 @@ from openvino.pyopenvino import TBlobUint8 from openvino.pyopenvino import TensorDesc from openvino.pyopenvino import InferRequest +from openvino.pyopenvino import ExecutableNetwork +from openvino.pyopenvino import Tensor precision_map = {"FP32": np.float32, @@ -35,22 +39,26 @@ def normalize_inputs(py_dict: dict) -> dict: """Normalize a dictionary of inputs to contiguous numpy arrays.""" - return {k: (np.ascontiguousarray(v) if isinstance(v, np.ndarray) else v) + return {k: (Tensor(v) if isinstance(v, np.ndarray) else v) for k, v in py_dict.items()} # flake8: noqa: D102 -def infer(request: InferRequest, inputs: dict = None) -> dict: - results = request._infer(inputs=normalize_inputs(inputs if inputs is not None else {})) - return {name: (blob.buffer.copy()) for name, blob in results.items()} +def infer(request: InferRequest, inputs: dict = {}) -> np.ndarray: + res = request._infer(inputs=normalize_inputs(inputs)) + # Required to return list since np.ndarray forces all of tensors data to match in + # dimensions. This results in errors when running ops like variadic split. + return [copy.deepcopy(tensor.data) for tensor in res] -# flake8: noqa: D102 -def get_result(request: InferRequest, name: str) -> np.ndarray: - return request.get_blob(name).buffer.copy() + +def infer_new_request(exec_net: ExecutableNetwork, inputs: dict = None) -> List[np.ndarray]: + res = exec_net._infer_new_request(inputs=normalize_inputs(inputs if inputs is not None else {})) + # Required to return list since np.ndarray forces all of tensors data to match in + # dimensions. This results in errors when running ops like variadic split. + return [copy.deepcopy(tensor.data) for tensor in res] # flake8: noqa: D102 -def async_infer(request: InferRequest, inputs: dict = None, userdata=None) -> None: # type: ignore - request._async_infer(inputs=normalize_inputs(inputs if inputs is not None else {}), - userdata=userdata) +def start_async(request: InferRequest, inputs: dict = {}, userdata: dict = None) -> None: # type: ignore + request._start_async(inputs=normalize_inputs(inputs), userdata=userdata) # flake8: noqa: C901 # Dispatch Blob types on Python side. @@ -112,3 +120,8 @@ def blob_from_file(path_to_bin_file: str) -> BlobWrapper: array = np.fromfile(path_to_bin_file, dtype=np.uint8) tensor_desc = TensorDesc("U8", array.shape, "C") return BlobWrapper(tensor_desc, array) + +# flake8: noqa: D102 +def tensor_from_file(path: str) -> Tensor: + """The data will be read with dtype of unit8""" + return Tensor(np.fromfile(path, dtype=np.uint8)) diff --git a/runtime/bindings/python/src/openvino/impl/__init__.py b/runtime/bindings/python/src/openvino/impl/__init__.py index 641764122dc5a9..cfde3f4b3e1d66 100644 --- a/runtime/bindings/python/src/openvino/impl/__init__.py +++ b/runtime/bindings/python/src/openvino/impl/__init__.py @@ -49,4 +49,5 @@ from openvino.pyopenvino import Coordinate from openvino.pyopenvino import Output from openvino.pyopenvino import Layout +from openvino.pyopenvino import ConstOutput from openvino.pyopenvino import util diff --git a/runtime/bindings/python/src/pyopenvino/core/common.cpp b/runtime/bindings/python/src/pyopenvino/core/common.cpp index 741ec010971926..b42f4d14419594 100644 --- a/runtime/bindings/python/src/pyopenvino/core/common.cpp +++ b/runtime/bindings/python/src/pyopenvino/core/common.cpp @@ -211,6 +211,48 @@ bool is_TBlob(const py::handle& blob) { } } +const ov::runtime::Tensor& cast_to_tensor(const py::handle& tensor) { + return tensor.cast(); +} + +const Containers::TensorNameMap cast_to_tensor_name_map(const py::dict& inputs) { + Containers::TensorNameMap result_map; + for (auto&& input : inputs) { + std::string name; + if (py::isinstance(input.first)) { + name = input.first.cast(); + } else { + throw py::type_error("incompatible function arguments!"); + } + if (py::isinstance(input.second)) { + auto tensor = Common::cast_to_tensor(input.second); + result_map[name] = tensor; + } else { + throw ov::Exception("Unable to cast tensor " + name + "!"); + } + } + return result_map; +} + +const Containers::TensorIndexMap cast_to_tensor_index_map(const py::dict& inputs) { + Containers::TensorIndexMap result_map; + for (auto&& input : inputs) { + int idx; + if (py::isinstance(input.first)) { + idx = input.first.cast(); + } else { + throw py::type_error("incompatible function arguments!"); + } + if (py::isinstance(input.second)) { + auto tensor = Common::cast_to_tensor(input.second); + result_map[idx] = tensor; + } else { + throw ov::Exception("Unable to cast tensor " + std::to_string(idx) + "!"); + } + } + return result_map; +} + const std::shared_ptr cast_to_blob(const py::handle& blob) { if (py::isinstance>(blob)) { return blob.cast>&>(); diff --git a/runtime/bindings/python/src/pyopenvino/core/common.hpp b/runtime/bindings/python/src/pyopenvino/core/common.hpp index 314a8290244581..d4be9bd2a77995 100644 --- a/runtime/bindings/python/src/pyopenvino/core/common.hpp +++ b/runtime/bindings/python/src/pyopenvino/core/common.hpp @@ -14,6 +14,8 @@ #include #include "Python.h" #include "ie_common.h" +#include "openvino/runtime/tensor.hpp" +#include "pyopenvino/core/containers.hpp" namespace py = pybind11; @@ -48,6 +50,12 @@ namespace Common const std::shared_ptr cast_to_blob(const py::handle& blob); + const Containers::TensorNameMap cast_to_tensor_name_map(const py::dict& inputs); + + const Containers::TensorIndexMap cast_to_tensor_index_map(const py::dict& inputs); + + const ov::runtime::Tensor& cast_to_tensor(const py::handle& tensor); + void blob_from_numpy(const py::handle& _arr, InferenceEngine::Blob::Ptr &blob); void set_request_blobs(InferenceEngine::InferRequest& request, const py::dict& dictonary); diff --git a/runtime/bindings/python/src/pyopenvino/core/containers.cpp b/runtime/bindings/python/src/pyopenvino/core/containers.cpp index 096b6074325815..12a50b1caf5331 100644 --- a/runtime/bindings/python/src/pyopenvino/core/containers.cpp +++ b/runtime/bindings/python/src/pyopenvino/core/containers.cpp @@ -1,51 +1,23 @@ - // Copyright (C) 2021 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #include "pyopenvino/core/containers.hpp" -#include #include -PYBIND11_MAKE_OPAQUE(Containers::PyInputsDataMap); -PYBIND11_MAKE_OPAQUE(Containers::PyConstInputsDataMap); -PYBIND11_MAKE_OPAQUE(Containers::PyOutputsDataMap); -PYBIND11_MAKE_OPAQUE(Containers::PyResults); +PYBIND11_MAKE_OPAQUE(Containers::TensorIndexMap); +PYBIND11_MAKE_OPAQUE(Containers::TensorNameMap); namespace py = pybind11; namespace Containers { -void regclass_PyInputsDataMap(py::module m) { - auto py_inputs_data_map = py::bind_map(m, "PyInputsDataMap"); - - py_inputs_data_map.def("keys", [](PyInputsDataMap& self) { - return py::make_key_iterator(self.begin(), self.end()); - }); -} - -void regclass_PyConstInputsDataMap(py::module m) { - auto py_const_inputs_data_map = py::bind_map(m, "PyConstInputsDataMap"); - - py_const_inputs_data_map.def("keys", [](PyConstInputsDataMap& self) { - return py::make_key_iterator(self.begin(), self.end()); - }); +void regclass_TensorIndexMap(py::module m) { + py::bind_map(m, "TensorIndexMap"); } -void regclass_PyOutputsDataMap(py::module m) { - auto py_outputs_data_map = py::bind_map(m, "PyOutputsDataMap"); - - py_outputs_data_map.def("keys", [](PyOutputsDataMap& self) { - return py::make_key_iterator(self.begin(), self.end()); - }); -} - -void regclass_PyResults(py::module m) { - auto py_results = py::bind_map(m, "PyResults"); - - py_results.def("keys", [](PyResults& self) { - return py::make_key_iterator(self.begin(), self.end()); - }); +void regclass_TensorNameMap(py::module m) { + py::bind_map(m, "TensorNameMap"); } } // namespace Containers diff --git a/runtime/bindings/python/src/pyopenvino/core/containers.hpp b/runtime/bindings/python/src/pyopenvino/core/containers.hpp index 511d9053ea50fa..e24e7336236cac 100644 --- a/runtime/bindings/python/src/pyopenvino/core/containers.hpp +++ b/runtime/bindings/python/src/pyopenvino/core/containers.hpp @@ -4,28 +4,21 @@ #pragma once -#include #include -#include -#include "ie_data.h" -#include "ie_blob.h" - -namespace py = pybind11; +#include +#include -namespace Containers { - using PyInputsDataMap = std::map>; +#include - using PyConstInputsDataMap = - std::map>; +#include - using PyOutputsDataMap = - std::map>; +namespace py = pybind11; - using PyResults = - std::map>; +namespace Containers { + using TensorIndexMap = std::map; + using TensorNameMap = std::map; + using InferResults = std::vector; - void regclass_PyInputsDataMap(py::module m); - void regclass_PyConstInputsDataMap(py::module m); - void regclass_PyOutputsDataMap(py::module m); - void regclass_PyResults(py::module m); -} \ No newline at end of file + void regclass_TensorIndexMap(py::module m); + void regclass_TensorNameMap(py::module m); +} diff --git a/runtime/bindings/python/src/pyopenvino/core/core.cpp b/runtime/bindings/python/src/pyopenvino/core/core.cpp new file mode 100644 index 00000000000000..7538ffd7f79f23 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/core/core.cpp @@ -0,0 +1,112 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "pyopenvino/core/core.hpp" + +#include +#include + +#include +#include + +#include "common.hpp" + +namespace py = pybind11; + +using ConfigMap = std::map; + +std::string to_string(py::handle handle) { + auto encodedString = PyUnicode_AsUTF8String(handle.ptr()); + return PyBytes_AsString(encodedString); +} + +void regclass_Core(py::module m) { + py::class_> cls(m, "Core"); + cls.def(py::init(), py::arg("xml_config_file") = ""); + + cls.def("set_config", + (void (ov::runtime::Core::*)(const ConfigMap&, const std::string&)) & ov::runtime::Core::set_config, + py::arg("config"), + py::arg("device_name") = ""); + + cls.def( + "compile_model", + (ov::runtime::ExecutableNetwork( + ov::runtime::Core::*)(const std::shared_ptr&, const std::string&, const ConfigMap&)) & + ov::runtime::Core::compile_model, + py::arg("network"), + py::arg("device_name"), + py::arg("config") = py::dict()); + + cls.def("get_versions", &ov::runtime::Core::get_versions); + + cls.def("read_model", + (std::shared_ptr(ov::runtime::Core::*)(const std::string&, const std::string&) const) & + ov::runtime::Core::read_model, + py::arg("model"), + py::arg("weights") = ""); + + cls.def( + "read_model", + (std::shared_ptr(ov::runtime::Core::*)(const std::string&, const ov::runtime::Tensor&) const) & + ov::runtime::Core::read_model, + py::arg("model"), + py::arg("weights")); + + cls.def( + "read_model", + [](ov::runtime::Core& self, py::object model, py::object weights) { + return self.read_model(py::str(model), py::str(weights)); + }, + py::arg("model"), + py::arg("weights") = ""); + + cls.def( + "import_model", + (ov::runtime::ExecutableNetwork(ov::runtime::Core::*)(std::istream&, const std::string&, const ConfigMap&)) & + ov::runtime::Core::import_model, + py::arg("model_file"), + py::arg("device_name"), + py::arg("config") = py::none()); + + cls.def( + "get_config", + [](ov::runtime::Core& self, const std::string& device_name, const std::string& name) -> py::handle { + return Common::parse_parameter(self.get_config(device_name, name)); + }, + py::arg("device_name"), + py::arg("name")); + + cls.def( + "get_metric", + [](ov::runtime::Core& self, const std::string device_name, const std::string name) -> py::handle { + return Common::parse_parameter(self.get_metric(device_name, name)); + }, + py::arg("device_name"), + py::arg("name")); + + cls.def("register_plugin", &ov::runtime::Core::register_plugin, py::arg("plugin_name"), py::arg("device_name")); + + cls.def("register_plugins", &ov::runtime::Core::register_plugins, py::arg("xml_config_file")); + + cls.def("unload_plugin", &ov::runtime::Core::unload_plugin, py::arg("device_name")); + + cls.def( + "query_model", + (ov::runtime::SupportedOpsMap( + ov::runtime::Core::*)(const std::shared_ptr&, const std::string&, const ConfigMap&)) & + ov::runtime::Core::query_model, + py::arg("model"), + py::arg("device_name"), + py::arg("config") = py::dict()); + + cls.def( + "add_extension", + [](ov::runtime::Core& self, const std::string& library_path) { + return self.add_extension(library_path); + }, + py::arg("library_path")); + + cls.def_property_readonly("available_devices", &ov::runtime::Core::get_available_devices); +} diff --git a/runtime/bindings/python/src/pyopenvino/core/ie_core.hpp b/runtime/bindings/python/src/pyopenvino/core/core.hpp similarity index 100% rename from runtime/bindings/python/src/pyopenvino/core/ie_core.hpp rename to runtime/bindings/python/src/pyopenvino/core/core.hpp diff --git a/runtime/bindings/python/src/pyopenvino/core/executable_network.cpp b/runtime/bindings/python/src/pyopenvino/core/executable_network.cpp new file mode 100644 index 00000000000000..389a865aea1b0a --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/core/executable_network.cpp @@ -0,0 +1,107 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/runtime/executable_network.hpp" + +#include + +#include "common.hpp" +#include "pyopenvino/core/containers.hpp" +#include "pyopenvino/core/infer_request.hpp" + +PYBIND11_MAKE_OPAQUE(Containers::TensorIndexMap); +PYBIND11_MAKE_OPAQUE(Containers::TensorNameMap); + +namespace py = pybind11; + +void regclass_ExecutableNetwork(py::module m) { + py::class_> cls( + m, + "ExecutableNetwork"); + + cls.def("create_infer_request", [](ov::runtime::ExecutableNetwork& self) { + return InferRequestWrapper(self.create_infer_request(), self.inputs(), self.outputs()); + }); + + cls.def( + "_infer_new_request", + [](ov::runtime::ExecutableNetwork& self, const py::dict& inputs) { + auto request = self.create_infer_request(); + const auto key = inputs.begin()->first; + if (!inputs.empty()) { + if (py::isinstance(key)) { + auto inputs_map = Common::cast_to_tensor_name_map(inputs); + for (auto&& input : inputs_map) { + request.set_tensor(input.first, input.second); + } + } else if (py::isinstance(key)) { + auto inputs_map = Common::cast_to_tensor_index_map(inputs); + for (auto&& input : inputs_map) { + request.set_input_tensor(input.first, input.second); + } + } else { + throw py::type_error("Incompatible key type! Supported types are string and int."); + } + } + + request.infer(); + + Containers::InferResults results; + for (const auto out : self.outputs()) { + results.push_back(request.get_tensor(out)); + } + return results; + }, + py::arg("inputs")); + + cls.def("export_model", &ov::runtime::ExecutableNetwork::export_model, py::arg("network_model")); + + cls.def( + "get_config", + [](ov::runtime::ExecutableNetwork& self, const std::string& name) -> py::handle { + return Common::parse_parameter(self.get_config(name)); + }, + py::arg("name")); + + cls.def( + "get_metric", + [](ov::runtime::ExecutableNetwork& self, const std::string& name) -> py::handle { + return Common::parse_parameter(self.get_metric(name)); + }, + py::arg("name")); + + cls.def("get_runtime_function", &ov::runtime::ExecutableNetwork::get_runtime_function); + + cls.def_property_readonly("inputs", &ov::runtime::ExecutableNetwork::inputs); + + cls.def("input", + (ov::Output(ov::runtime::ExecutableNetwork::*)() const) & + ov::runtime::ExecutableNetwork::input); + + cls.def("input", + (ov::Output(ov::runtime::ExecutableNetwork::*)(size_t) const) & + ov::runtime::ExecutableNetwork::input, + py::arg("i")); + + cls.def("input", + (ov::Output(ov::runtime::ExecutableNetwork::*)(const std::string&) const) & + ov::runtime::ExecutableNetwork::input, + py::arg("tensor_name")); + + cls.def_property_readonly("outputs", &ov::runtime::ExecutableNetwork::outputs); + + cls.def("output", + (ov::Output(ov::runtime::ExecutableNetwork::*)() const) & + ov::runtime::ExecutableNetwork::output); + + cls.def("output", + (ov::Output(ov::runtime::ExecutableNetwork::*)(size_t) const) & + ov::runtime::ExecutableNetwork::output, + py::arg("i")); + + cls.def("output", + (ov::Output(ov::runtime::ExecutableNetwork::*)(const std::string&) const) & + ov::runtime::ExecutableNetwork::output, + py::arg("tensor_name")); +} diff --git a/runtime/bindings/python/src/pyopenvino/core/ie_executable_network.hpp b/runtime/bindings/python/src/pyopenvino/core/executable_network.hpp similarity index 100% rename from runtime/bindings/python/src/pyopenvino/core/ie_executable_network.hpp rename to runtime/bindings/python/src/pyopenvino/core/executable_network.hpp diff --git a/runtime/bindings/python/src/pyopenvino/core/ie_core.cpp b/runtime/bindings/python/src/pyopenvino/core/ie_core.cpp deleted file mode 100644 index 9d37bfb00f7fe3..00000000000000 --- a/runtime/bindings/python/src/pyopenvino/core/ie_core.cpp +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright (C) 2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "pyopenvino/core/ie_core.hpp" - -#include - -#include - -#include "common.hpp" - -namespace py = pybind11; - -std::string to_string(py::handle handle) { - auto encodedString = PyUnicode_AsUTF8String(handle.ptr()); - return PyBytes_AsString(encodedString); -} - -void regclass_Core(py::module m) { - py::class_> cls(m, "Core"); - cls.def(py::init()); - cls.def(py::init()); - - cls.def( - "set_config", - [](InferenceEngine::Core& self, const py::dict& config, const std::string& device_name) { - std::map config_map; - for (auto item : config) { - config_map[to_string(item.first)] = to_string(item.second); - } - self.SetConfig(config_map, device_name); - }, - py::arg("config"), - py::arg("device_name")); - - cls.def( - "load_network", - [](InferenceEngine::Core& self, - const InferenceEngine::CNNNetwork& network, - const std::string& device_name, - const std::map& config) { - return self.LoadNetwork(network, device_name, config); - }, - py::arg("network"), - py::arg("device_name"), - py::arg("config") = py::dict()); - - cls.def( - "add_extension", - [](InferenceEngine::Core& self, const std::string& extension_path, const std::string& device_name) { - auto extension_ptr = InferenceEngine::make_so_pointer(extension_path); - auto extension = std::dynamic_pointer_cast(extension_ptr); - self.AddExtension(extension, device_name); - }, - py::arg("extension_path"), - py::arg("device_name")); - - cls.def( - "get_versions", - [](InferenceEngine::Core& self, const std::string& device_name) { - return self.GetVersions(device_name); - }, - py::arg("device_name")); - - cls.def( - "read_network", - [](InferenceEngine::Core& self, py::bytes model, py::bytes weights) { - InferenceEngine::MemoryBlob::Ptr weights_blob; - if (weights) { - std::string weights_bytes = weights; - uint8_t* bin = (uint8_t*)weights_bytes.c_str(); - size_t bin_size = weights_bytes.length(); - InferenceEngine::TensorDesc tensorDesc(InferenceEngine::Precision::U8, - {bin_size}, - InferenceEngine::Layout::C); - weights_blob = InferenceEngine::make_shared_blob(tensorDesc); - weights_blob->allocate(); - memcpy(weights_blob->rwmap().as(), bin, bin_size); - } - return self.ReadNetwork(model, weights_blob); - }, - py::arg("model"), - py::arg("weights")); - - cls.def( - "read_network", - [](InferenceEngine::Core& self, const std::string& model, const std::string& weights) { - return self.ReadNetwork(model, weights); - }, - py::arg("model"), - py::arg("weights") = ""); - - cls.def( - "read_network", - [](InferenceEngine::Core& self, const std::string& model, py::handle blob) { - return self.ReadNetwork(model, Common::cast_to_blob(blob)); - }, - py::arg("model"), - py::arg("blob")); - - cls.def( - "read_network", - [](InferenceEngine::Core& self, py::object model, py::object weights) { - return self.ReadNetwork(py::str(model), py::str(weights)); - }, - py::arg("model"), - py::arg("weights") = ""); - - cls.def( - "import_network", - [](InferenceEngine::Core& self, - const std::string& model_file, - const std::string& device_name, - const std::map& config) { - return self.ImportNetwork(model_file, device_name, config); - }, - py::arg("model_file"), - py::arg("device_name"), - py::arg("config") = py::none()); - - cls.def( - "get_config", - [](InferenceEngine::Core& self, const std::string& device_name, const std::string& config_name) -> py::handle { - return Common::parse_parameter(self.GetConfig(device_name, config_name)); - }, - py::arg("device_name"), - py::arg("config_name")); - - cls.def( - "get_metric", - [](InferenceEngine::Core& self, std::string device_name, std::string metric_name) -> py::handle { - return Common::parse_parameter(self.GetMetric(device_name, metric_name)); - }, - py::arg("device_name"), - py::arg("metric_name")); - - cls.def("register_plugin", - &InferenceEngine::Core::RegisterPlugin, - py::arg("plugin_name"), - py::arg("device_name") = py::str()); - - cls.def("register_plugins", &InferenceEngine::Core::RegisterPlugins); - - cls.def("unregister_plugin", &InferenceEngine::Core::UnregisterPlugin, py::arg("device_name")); - - cls.def( - "query_network", - [](InferenceEngine::Core& self, - const InferenceEngine::CNNNetwork& network, - const std::string& device_name, - const std::map& config) { - return self.QueryNetwork(network, device_name, config).supportedLayersMap; - }, - py::arg("network"), - py::arg("device_name"), - py::arg("config") = py::dict()); - - cls.def_property_readonly("available_devices", &InferenceEngine::Core::GetAvailableDevices); -} diff --git a/runtime/bindings/python/src/pyopenvino/core/ie_executable_network.cpp b/runtime/bindings/python/src/pyopenvino/core/ie_executable_network.cpp deleted file mode 100644 index 37199110f09e68..00000000000000 --- a/runtime/bindings/python/src/pyopenvino/core/ie_executable_network.cpp +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright (C) 2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 - -#include "pyopenvino/core/ie_executable_network.hpp" - -#include -#include - -#include "common.hpp" -#include "pyopenvino/core/containers.hpp" -#include "pyopenvino/core/ie_infer_request.hpp" -#include "pyopenvino/core/ie_input_info.hpp" - -namespace py = pybind11; - -void regclass_ExecutableNetwork(py::module m) { - py::class_> cls( - m, - "ExecutableNetwork"); - - cls.def("create_infer_request", [](InferenceEngine::ExecutableNetwork& self) { - auto request = InferRequestWrapper(self.CreateInferRequest()); - // Get Inputs and Outputs info from executable network - request._inputsInfo = self.GetInputsInfo(); - request._outputsInfo = self.GetOutputsInfo(); - // request.user_callback_defined = false; - return request; - }); - - cls.def( - "_infer", - [](InferenceEngine::ExecutableNetwork& self, const py::dict& inputs) { - // Create temporary InferRequest - auto request = self.CreateInferRequest(); - // Update inputs if there are any - if (!inputs.empty()) { - Common::set_request_blobs(request, inputs); //, self.GetInputsInfo()); - } - // Call Infer function - request.Infer(); - // Get output Blobs and return - Containers::PyResults results; - InferenceEngine::ConstOutputsDataMap outputsInfo = self.GetOutputsInfo(); - for (auto& out : outputsInfo) { - results[out.first] = request.GetBlob(out.first); - } - return results; - }, - py::arg("inputs")); - - cls.def("get_exec_graph_info", &InferenceEngine::ExecutableNetwork::GetExecGraphInfo); - - cls.def( - "export", - [](InferenceEngine::ExecutableNetwork& self, const std::string& modelFileName) { - self.Export(modelFileName); - }, - py::arg("model_file")); - - cls.def( - "get_config", - [](InferenceEngine::ExecutableNetwork& self, const std::string& config_name) -> py::handle { - return Common::parse_parameter(self.GetConfig(config_name)); - }, - py::arg("config_name")); - - cls.def( - "get_metric", - [](InferenceEngine::ExecutableNetwork& self, const std::string& metric_name) -> py::handle { - return Common::parse_parameter(self.GetMetric(metric_name)); - }, - py::arg("metric_name")); - - cls.def_property_readonly("input_info", [](InferenceEngine::ExecutableNetwork& self) { - Containers::PyConstInputsDataMap inputs; - const InferenceEngine::ConstInputsDataMap& inputsInfo = self.GetInputsInfo(); - for (const auto& in : inputsInfo) { - inputs[in.first] = in.second; - } - return inputs; - }); - - cls.def_property_readonly("output_info", [](InferenceEngine::ExecutableNetwork& self) { - Containers::PyOutputsDataMap outputs; - InferenceEngine::ConstOutputsDataMap outputsInfo = self.GetOutputsInfo(); - for (auto& out : outputsInfo) { - outputs[out.first] = out.second; - } - return outputs; - }); -} diff --git a/runtime/bindings/python/src/pyopenvino/core/ie_infer_queue.cpp b/runtime/bindings/python/src/pyopenvino/core/ie_infer_queue.cpp index d54e7cce69c9ff..e80cd33105f01b 100644 --- a/runtime/bindings/python/src/pyopenvino/core/ie_infer_queue.cpp +++ b/runtime/bindings/python/src/pyopenvino/core/ie_infer_queue.cpp @@ -18,7 +18,7 @@ #include #include "pyopenvino/core/common.hpp" -#include "pyopenvino/core/ie_infer_request.hpp" +#include "pyopenvino/core/infer_request.hpp" #define INVALID_ID -1 @@ -59,16 +59,9 @@ class InferQueue { size_t request_id = _idle_handles.front(); - InferenceEngine::StatusCode status = - _requests[request_id]._request.Wait(InferenceEngine::IInferRequest::WaitMode::STATUS_ONLY); - - if (status == InferenceEngine::StatusCode::RESULT_NOT_READY) { - status = _requests[request_id]._request.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY); - } - py::dict request_info = py::dict(); request_info["id"] = request_id; - request_info["status"] = status; + // request_info["status"] = true; // TODO return request_info; } @@ -87,7 +80,7 @@ class InferQueue { return idle_request_id; } - std::vector waitAll() { + std::vector waitAll() { // Wait for all requests to return with callback thus updating // _idle_handles so it matches the size of requests py::gil_scoped_release release; @@ -96,10 +89,10 @@ class InferQueue { return _idle_handles.size() == _requests.size(); }); - std::vector statuses; + std::vector statuses; for (size_t handle = 0; handle < _requests.size(); handle++) { - statuses.push_back(_requests[handle]._request.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY)); + statuses.push_back(_requests[handle]._request.wait_for(std::chrono::milliseconds(0))); } return statuses; @@ -107,8 +100,8 @@ class InferQueue { void setDefaultCallbacks() { for (size_t handle = 0; handle < _requests.size(); handle++) { - _requests[handle]._request.SetCompletionCallback([this, handle /* ... */]() { - _requests[handle]._endTime = Time::now(); + _requests[handle]._request.set_callback([this, handle /* ... */](std::exception_ptr exception_ptr) { + _requests[handle]._end_time = Time::now(); // Add idle handle to queue _idle_handles.push(handle); // Notify locks in getIdleRequestId() or waitAll() functions @@ -119,16 +112,18 @@ class InferQueue { void setCustomCallbacks(py::function f_callback) { for (size_t handle = 0; handle < _requests.size(); handle++) { - _requests[handle]._request.SetCompletionCallback([this, f_callback, handle /* ... */]() { - _requests[handle]._endTime = Time::now(); - InferenceEngine::StatusCode statusCode = - _requests[handle]._request.Wait(InferenceEngine::IInferRequest::WaitMode::STATUS_ONLY); - if (statusCode == InferenceEngine::StatusCode::RESULT_NOT_READY) { - statusCode = InferenceEngine::StatusCode::OK; + _requests[handle]._request.set_callback([this, f_callback, handle](std::exception_ptr exception_ptr) { + _requests[handle]._end_time = Time::now(); + try { + if (exception_ptr) { + std::rethrow_exception(exception_ptr); + } + } catch (const std::exception& e) { + IE_THROW() << "Caught exception: " << e.what(); } // Acquire GIL, execute Python function py::gil_scoped_acquire acquire; - f_callback(_requests[handle], statusCode, _user_ids[handle]); + f_callback(_requests[handle], _user_ids[handle]); // Add idle handle to queue _idle_handles.push(handle); // Notify locks in getIdleRequestId() or waitAll() functions @@ -145,89 +140,89 @@ class InferQueue { std::condition_variable _cv; }; -void regclass_InferQueue(py::module m) { - py::class_> cls(m, "InferQueue"); - - cls.def(py::init([](InferenceEngine::ExecutableNetwork& net, size_t jobs) { - if (jobs == 0) { - const InferenceEngine::ExecutableNetwork& _net = net; - jobs = (size_t)Common::get_optimal_number_of_requests(_net); - } - - std::vector requests; - std::queue idle_handles; - std::vector user_ids(jobs); - - for (size_t handle = 0; handle < jobs; handle++) { - auto request = InferRequestWrapper(net.CreateInferRequest()); - // Get Inputs and Outputs info from executable network - request._inputsInfo = net.GetInputsInfo(); - request._outputsInfo = net.GetOutputsInfo(); - - requests.push_back(request); - idle_handles.push(handle); - } - - return new InferQueue(requests, idle_handles, user_ids); - }), - py::arg("network"), - py::arg("jobs") = 0); - - cls.def( - "_async_infer", - [](InferQueue& self, const py::dict inputs, py::object userdata) { - // getIdleRequestId function has an intention to block InferQueue - // until there is at least one idle (free to use) InferRequest - auto handle = self.getIdleRequestId(); - // Set new inputs label/id from user - self._user_ids[handle] = userdata; - // Update inputs of picked InferRequest - if (!inputs.empty()) { - Common::set_request_blobs(self._requests[handle]._request, inputs); - } - // Now GIL can be released - we are NOT working with Python objects in this block - { - py::gil_scoped_release release; - self._requests[handle]._startTime = Time::now(); - // Start InferRequest in asynchronus mode - self._requests[handle]._request.StartAsync(); - } - }, - py::arg("inputs"), - py::arg("userdata")); - - cls.def("is_ready", [](InferQueue& self) { - return self._is_ready(); - }); - - cls.def("wait_all", [](InferQueue& self) { - return self.waitAll(); - }); - - cls.def("get_idle_request_info", [](InferQueue& self) { - return self._getIdleRequestInfo(); - }); - - cls.def("set_infer_callback", [](InferQueue& self, py::function f_callback) { - self.setCustomCallbacks(f_callback); - }); - - cls.def("__len__", [](InferQueue& self) { - return self._requests.size(); - }); - - cls.def( - "__iter__", - [](InferQueue& self) { - return py::make_iterator(self._requests.begin(), self._requests.end()); - }, - py::keep_alive<0, 1>()); /* Keep set alive while iterator is used */ - - cls.def("__getitem__", [](InferQueue& self, size_t i) { - return self._requests[i]; - }); - - cls.def_property_readonly("userdata", [](InferQueue& self) { - return self._user_ids; - }); -} +// void regclass_InferQueue(py::module m) { +// py::class_> cls(m, "InferQueue"); + +// cls.def(py::init([](InferenceEngine::ExecutableNetwork& net, size_t jobs) { +// if (jobs == 0) { +// const InferenceEngine::ExecutableNetwork& _net = net; +// jobs = (size_t)Common::get_optimal_number_of_requests(_net); +// } + +// std::vector requests; +// std::queue idle_handles; +// std::vector user_ids(jobs); + +// for (size_t handle = 0; handle < jobs; handle++) { +// auto request = InferRequestWrapper(net.CreateInferRequest()); +// // Get Inputs and Outputs info from executable network +// request._inputsInfo = net.GetInputsInfo(); +// request._outputsInfo = net.GetOutputsInfo(); + +// requests.push_back(request); +// idle_handles.push(handle); +// } + +// return new InferQueue(requests, idle_handles, user_ids); +// }), +// py::arg("network"), +// py::arg("jobs") = 0); + +// cls.def( +// "_async_infer", +// [](InferQueue& self, const py::dict inputs, py::object userdata) { +// // getIdleRequestId function has an intention to block InferQueue +// // until there is at least one idle (free to use) InferRequest +// auto handle = self.getIdleRequestId(); +// // Set new inputs label/id from user +// self._user_ids[handle] = userdata; +// // Update inputs of picked InferRequest +// if (!inputs.empty()) { +// Common::set_request_blobs(self._requests[handle]._request, inputs); +// } +// // Now GIL can be released - we are NOT working with Python objects in this block +// { +// py::gil_scoped_release release; +// self._requests[handle]._start_time = Time::now(); +// // Start InferRequest in asynchronus mode +// self._requests[handle]._request.start_async(); +// } +// }, +// py::arg("inputs"), +// py::arg("userdata")); + +// cls.def("is_ready", [](InferQueue& self) { +// return self._is_ready(); +// }); + +// cls.def("wait_all", [](InferQueue& self) { +// return self.waitAll(); +// }); + +// cls.def("get_idle_request_info", [](InferQueue& self) { +// return self._getIdleRequestInfo(); +// }); + +// cls.def("set_infer_callback", [](InferQueue& self, py::function f_callback) { +// self.setCustomCallbacks(f_callback); +// }); + +// cls.def("__len__", [](InferQueue& self) { +// return self._requests.size(); +// }); + +// cls.def( +// "__iter__", +// [](InferQueue& self) { +// return py::make_iterator(self._requests.begin(), self._requests.end()); +// }, +// py::keep_alive<0, 1>()); /* Keep set alive while iterator is used */ + +// cls.def("__getitem__", [](InferQueue& self, size_t i) { +// return self._requests[i]; +// }); + +// cls.def_property_readonly("userdata", [](InferQueue& self) { +// return self._user_ids; +// }); +// } diff --git a/runtime/bindings/python/src/pyopenvino/core/ie_infer_request.cpp b/runtime/bindings/python/src/pyopenvino/core/ie_infer_request.cpp deleted file mode 100644 index f45ddd6a5cd73d..00000000000000 --- a/runtime/bindings/python/src/pyopenvino/core/ie_infer_request.cpp +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright (C) 2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 - -#include "pyopenvino/core/ie_infer_request.hpp" - -#include -#include - -#include - -#include "pyopenvino/core/common.hpp" -#include "pyopenvino/core/containers.hpp" -#include "pyopenvino/core/ie_executable_network.hpp" -#include "pyopenvino/core/ie_preprocess_info.hpp" - -namespace py = pybind11; - -void regclass_InferRequest(py::module m) { - py::class_> cls(m, "InferRequest"); - - cls.def( - "set_batch", - [](InferRequestWrapper& self, const int size) { - self._request.SetBatch(size); - }, - py::arg("size")); - - cls.def( - "get_blob", - [](InferRequestWrapper& self, const std::string& name) { - return self._request.GetBlob(name); - }, - py::arg("name")); - - cls.def( - "set_blob", - [](InferRequestWrapper& self, const std::string& name, py::handle& blob) { - self._request.SetBlob(name, Common::cast_to_blob(blob)); - }, - py::arg("name"), - py::arg("blob")); - - cls.def( - "set_blob", - [](InferRequestWrapper& self, - const std::string& name, - py::handle& blob, - const InferenceEngine::PreProcessInfo& info) { - self._request.SetBlob(name, Common::cast_to_blob(blob)); - }, - py::arg("name"), - py::arg("blob"), - py::arg("info")); - - cls.def( - "set_input", - [](InferRequestWrapper& self, const py::dict& inputs) { - Common::set_request_blobs(self._request, inputs); - }, - py::arg("inputs")); - - cls.def( - "set_output", - [](InferRequestWrapper& self, const py::dict& results) { - Common::set_request_blobs(self._request, results); - }, - py::arg("results")); - - cls.def( - "_infer", - [](InferRequestWrapper& self, const py::dict& inputs) { - // Update inputs if there are any - if (!inputs.empty()) { - Common::set_request_blobs(self._request, inputs); - } - // Call Infer function - self._startTime = Time::now(); - self._request.Infer(); - self._endTime = Time::now(); - // Get output Blobs and return - Containers::PyResults results; - for (auto& out : self._outputsInfo) { - results[out.first] = self._request.GetBlob(out.first); - } - return results; - }, - py::arg("inputs")); - - cls.def( - "_async_infer", - [](InferRequestWrapper& self, const py::dict inputs, py::object userdata) { - py::gil_scoped_release release; - if (!inputs.empty()) { - Common::set_request_blobs(self._request, inputs); - } - // TODO: check for None so next async infer userdata can be updated - // if (!userdata.empty()) - // { - // if (user_callback_defined) - // { - // self._request.SetCompletionCallback([self, userdata]() { - // // py::gil_scoped_acquire acquire; - // auto statusCode = const_cast(self).Wait( - // InferenceEngine::IInferRequest::WaitMode::STATUS_ONLY); - // self._request.user_callback(self, statusCode, userdata); - // // py::gil_scoped_release release; - // }); - // } - // else - // { - // py::print("There is no callback function!"); - // } - // } - self._startTime = Time::now(); - self._request.StartAsync(); - }, - py::arg("inputs"), - py::arg("userdata")); - - cls.def("cancel", [](InferRequestWrapper& self) { - self._request.Cancel(); - }); - - cls.def( - "wait", - [](InferRequestWrapper& self, int64_t millis_timeout) { - py::gil_scoped_release release; - return self._request.Wait(millis_timeout); - }, - py::arg("millis_timeout") = InferenceEngine::IInferRequest::WaitMode::RESULT_READY); - - cls.def( - "set_completion_callback", - [](InferRequestWrapper& self, py::function f_callback, py::object userdata) { - self._request.SetCompletionCallback([&self, f_callback, userdata]() { - self._endTime = Time::now(); - InferenceEngine::StatusCode statusCode = - self._request.Wait(InferenceEngine::IInferRequest::WaitMode::STATUS_ONLY); - if (statusCode == InferenceEngine::StatusCode::RESULT_NOT_READY) { - statusCode = InferenceEngine::StatusCode::OK; - } - // Acquire GIL, execute Python function - py::gil_scoped_acquire acquire; - f_callback(self, statusCode, userdata); - }); - }, - py::arg("f_callback"), - py::arg("userdata")); - - cls.def("get_perf_counts", [](InferRequestWrapper& self) { - std::map perfMap; - perfMap = self._request.GetPerformanceCounts(); - py::dict perf_map; - - for (auto it : perfMap) { - py::dict profile_info; - switch (it.second.status) { - case InferenceEngine::InferenceEngineProfileInfo::EXECUTED: - profile_info["status"] = "EXECUTED"; - break; - case InferenceEngine::InferenceEngineProfileInfo::NOT_RUN: - profile_info["status"] = "NOT_RUN"; - break; - case InferenceEngine::InferenceEngineProfileInfo::OPTIMIZED_OUT: - profile_info["status"] = "OPTIMIZED_OUT"; - break; - default: - profile_info["status"] = "UNKNOWN"; - } - profile_info["exec_type"] = it.second.exec_type; - profile_info["layer_type"] = it.second.layer_type; - profile_info["cpu_time"] = it.second.cpu_uSec; - profile_info["real_time"] = it.second.realTime_uSec; - profile_info["execution_index"] = it.second.execution_index; - perf_map[it.first.c_str()] = profile_info; - } - return perf_map; - }); - - cls.def( - "preprocess_info", - [](InferRequestWrapper& self, const std::string& name) { - return self._request.GetPreProcess(name); - }, - py::arg("name")); - - // cls.def_property_readonly("preprocess_info", [](InferRequestWrapper& self) { - // - // }); - - cls.def_property_readonly("input_blobs", [](InferRequestWrapper& self) { - Containers::PyResults input_blobs; - for (auto& in : self._inputsInfo) { - input_blobs[in.first] = self._request.GetBlob(in.first); - } - return input_blobs; - }); - - cls.def_property_readonly("output_blobs", [](InferRequestWrapper& self) { - Containers::PyResults output_blobs; - for (auto& out : self._outputsInfo) { - output_blobs[out.first] = self._request.GetBlob(out.first); - } - return output_blobs; - }); - - cls.def_property_readonly("latency", [](InferRequestWrapper& self) { - return self.getLatency(); - }); -} diff --git a/runtime/bindings/python/src/pyopenvino/core/ie_infer_request.hpp b/runtime/bindings/python/src/pyopenvino/core/ie_infer_request.hpp deleted file mode 100644 index 13afbac440360d..00000000000000 --- a/runtime/bindings/python/src/pyopenvino/core/ie_infer_request.hpp +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (C) 2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -#include - -#include -#include -#include - -namespace py = pybind11; - -typedef std::chrono::high_resolution_clock Time; -typedef std::chrono::nanoseconds ns; - -class InferRequestWrapper { -public: - InferRequestWrapper(InferenceEngine::InferRequest request) - : _request(request) - { - } - // ~InferRequestWrapper() = default; - - // bool user_callback_defined; - // py::function user_callback; - - double getLatency() { - auto execTime = std::chrono::duration_cast(_endTime - _startTime); - return static_cast(execTime.count()) * 0.000001; - } - - InferenceEngine::InferRequest _request; - InferenceEngine::ConstInputsDataMap _inputsInfo; - InferenceEngine::ConstOutputsDataMap _outputsInfo; - Time::time_point _startTime; - Time::time_point _endTime; -}; - -void regclass_InferRequest(py::module m); diff --git a/runtime/bindings/python/src/pyopenvino/core/ie_network.cpp b/runtime/bindings/python/src/pyopenvino/core/ie_network.cpp index b57765f19cf55c..e06f9bf79bb4c3 100644 --- a/runtime/bindings/python/src/pyopenvino/core/ie_network.cpp +++ b/runtime/bindings/python/src/pyopenvino/core/ie_network.cpp @@ -79,15 +79,6 @@ void regclass_IENetwork(py::module m) { &InferenceEngine::CNNNetwork::getBatchSize, &InferenceEngine::CNNNetwork::setBatchSize); - cls.def_property_readonly("input_info", [](InferenceEngine::CNNNetwork& self) { - Containers::PyInputsDataMap inputs; - const InferenceEngine::InputsDataMap& inputsInfo = self.getInputsInfo(); - for (auto& in : inputsInfo) { - inputs[in.first] = in.second; - } - return inputs; - }); - cls.def_property_readonly("outputs", [](InferenceEngine::CNNNetwork& self) { return self.getOutputsInfo(); }); diff --git a/runtime/bindings/python/src/pyopenvino/core/ie_version.cpp b/runtime/bindings/python/src/pyopenvino/core/ie_version.cpp deleted file mode 100644 index 158cda68ceaaef..00000000000000 --- a/runtime/bindings/python/src/pyopenvino/core/ie_version.cpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "pyopenvino/core/ie_version.hpp" - -#include - -namespace py = pybind11; - -void regclass_Version(py::module m) { - py::class_ cls(m, "Version"); - - cls.def_readonly("build_number", &InferenceEngine::Version::buildNumber); - cls.def_readonly("description", &InferenceEngine::Version::description); - cls.def_readwrite("api_version", &InferenceEngine::Version::apiVersion); - - cls.def_property_readonly("major", [](InferenceEngine::Version& self) { - return IE_VERSION_MAJOR; - }); - - cls.def_property_readonly("minor", [](InferenceEngine::Version& self) { - return IE_VERSION_MINOR; - }); -} \ No newline at end of file diff --git a/runtime/bindings/python/src/pyopenvino/core/infer_request.cpp b/runtime/bindings/python/src/pyopenvino/core/infer_request.cpp new file mode 100644 index 00000000000000..b57d4f7569e2d6 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/core/infer_request.cpp @@ -0,0 +1,288 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + +#include "pyopenvino/core/infer_request.hpp" + +#include +#include +#include +#include + +#include + +#include "pyopenvino/core/common.hpp" +#include "pyopenvino/core/containers.hpp" + +PYBIND11_MAKE_OPAQUE(Containers::TensorIndexMap); +PYBIND11_MAKE_OPAQUE(Containers::TensorNameMap); + +namespace py = pybind11; + +void regclass_InferRequest(py::module m) { + py::class_> cls(m, "InferRequest"); + cls.def( + "set_tensors", + [](InferRequestWrapper& self, const py::dict& inputs) { + auto tensor_map = Common::cast_to_tensor_name_map(inputs); + for (auto&& input : tensor_map) { + self._request.set_tensor(input.first, input.second); + } + }, + py::arg("inputs")); + + cls.def( + "set_output_tensors", + [](InferRequestWrapper& self, const py::dict& outputs) { + auto outputs_map = Common::cast_to_tensor_index_map(outputs); + for (auto&& output : outputs_map) { + self._request.set_output_tensor(output.first, output.second); + } + }, + py::arg("outputs")); + + cls.def( + "set_input_tensors", + [](InferRequestWrapper& self, const py::dict& inputs) { + auto inputs_map = Common::cast_to_tensor_index_map(inputs); + for (auto&& input : inputs_map) { + self._request.set_input_tensor(input.first, input.second); + } + }, + py::arg("inputs")); + + cls.def( + "_infer", + [](InferRequestWrapper& self, const py::dict& inputs) { + // Update inputs if there are any + if (!inputs.empty()) { + if (py::isinstance(inputs.begin()->first)) { + auto inputs_map = Common::cast_to_tensor_name_map(inputs); + for (auto&& input : inputs_map) { + self._request.set_tensor(input.first, input.second); + } + } else if (py::isinstance(inputs.begin()->first)) { + auto inputs_map = Common::cast_to_tensor_index_map(inputs); + for (auto&& input : inputs_map) { + self._request.set_input_tensor(input.first, input.second); + } + } + } + // Call Infer function + self._start_time = Time::now(); + self._request.infer(); + self._end_time = Time::now(); + Containers::InferResults results; + for (auto& out : self._outputs) { + results.push_back(self._request.get_tensor(out)); + } + return results; + }, + py::arg("inputs")); + + cls.def( + "_start_async", + [](InferRequestWrapper& self, const py::dict& inputs, py::object& userdata) { + // Update inputs if there are any + if (!inputs.empty()) { + if (py::isinstance(inputs.begin()->first)) { + auto inputs_map = Common::cast_to_tensor_name_map(inputs); + for (auto&& input : inputs_map) { + self._request.set_tensor(input.first, input.second); + } + } else if (py::isinstance(inputs.begin()->first)) { + auto inputs_map = Common::cast_to_tensor_index_map(inputs); + for (auto&& input : inputs_map) { + self._request.set_input_tensor(input.first, input.second); + } + } + } + if (userdata != py::none()) { + if (self.user_callback_defined) { + self.userdata = userdata; + } else { + PyErr_WarnEx(PyExc_RuntimeWarning, "There is no callback function!", 1); + } + } + py::gil_scoped_release release; + self._start_time = Time::now(); + self._request.start_async(); + }, + py::arg("inputs"), + py::arg("userdata")); + + cls.def("cancel", [](InferRequestWrapper& self) { + self._request.cancel(); + }); + + cls.def("wait", [](InferRequestWrapper& self) { + py::gil_scoped_release release; + self._request.wait(); + }); + + cls.def( + "wait_for", + [](InferRequestWrapper& self, const int timeout) { + py::gil_scoped_release release; + return self._request.wait_for(std::chrono::milliseconds(timeout)); + }, + py::arg("timeout")); + + cls.def( + "set_callback", + [](InferRequestWrapper& self, py::function f_callback, py::object& userdata) { + self.userdata = userdata; + self.user_callback_defined = true; + self._request.set_callback([&self, f_callback](std::exception_ptr exception_ptr) { + self._end_time = Time::now(); + try { + if (exception_ptr) { + std::rethrow_exception(exception_ptr); + } + } catch (const std::exception& e) { + throw ov::Exception("Caught exception: " + std::string(e.what())); + } + // Acquire GIL, execute Python function + py::gil_scoped_acquire acquire; + f_callback(self.userdata); + }); + }, + py::arg("f_callback"), + py::arg("userdata")); + + cls.def( + "get_tensor", + [](InferRequestWrapper& self, const std::string& name) { + return self._request.get_tensor(name); + }, + py::arg("name")); + + cls.def( + "get_tensor", + [](InferRequestWrapper& self, const ov::Output& port) { + return self._request.get_tensor(port); + }, + py::arg("port")); + + cls.def( + "get_tensor", + [](InferRequestWrapper& self, const ov::Output& port) { + return self._request.get_tensor(port); + }, + py::arg("port")); + + cls.def( + "get_input_tensor", + [](InferRequestWrapper& self, size_t idx) { + return self._request.get_input_tensor(idx); + }, + py::arg("idx")); + + cls.def("get_input_tensor", [](InferRequestWrapper& self) { + return self._request.get_input_tensor(); + }); + + cls.def( + "get_output_tensor", + [](InferRequestWrapper& self, size_t idx) { + return self._request.get_output_tensor(idx); + }, + py::arg("idx")); + + cls.def("get_output_tensor", [](InferRequestWrapper& self) { + return self._request.get_output_tensor(); + }); + + cls.def( + "set_tensor", + [](InferRequestWrapper& self, const std::string& name, const ov::runtime::Tensor& tensor) { + self._request.set_tensor(name, tensor); + }, + py::arg("name"), + py::arg("tensor")); + + cls.def( + "set_tensor", + [](InferRequestWrapper& self, const ov::Output& port, const ov::runtime::Tensor& tensor) { + self._request.set_tensor(port, tensor); + }, + py::arg("port"), + py::arg("tensor")); + + cls.def( + "set_tensor", + [](InferRequestWrapper& self, const ov::Output& port, const ov::runtime::Tensor& tensor) { + self._request.set_tensor(port, tensor); + }, + py::arg("port"), + py::arg("tensor")); + + cls.def( + "set_input_tensor", + [](InferRequestWrapper& self, size_t idx, const ov::runtime::Tensor& tensor) { + self._request.set_input_tensor(idx, tensor); + }, + py::arg("idx"), + py::arg("tensor")); + + cls.def( + "set_input_tensor", + [](InferRequestWrapper& self, const ov::runtime::Tensor& tensor) { + self._request.set_input_tensor(tensor); + }, + py::arg("tensor")); + + cls.def( + "set_output_tensor", + [](InferRequestWrapper& self, size_t idx, const ov::runtime::Tensor& tensor) { + self._request.set_output_tensor(idx, tensor); + }, + py::arg("idx"), + py::arg("tensor")); + + cls.def( + "set_output_tensor", + [](InferRequestWrapper& self, const ov::runtime::Tensor& tensor) { + self._request.set_output_tensor(tensor); + }, + py::arg("tensor")); + + cls.def("get_profiling_info", [](InferRequestWrapper& self) { + return self._request.get_profiling_info(); + }); + + cls.def_property_readonly("userdata", [](InferRequestWrapper& self) { + return self.userdata; + }); + + cls.def_property_readonly("inputs", [](InferRequestWrapper& self) { + return self._inputs; + }); + + cls.def_property_readonly("outputs", [](InferRequestWrapper& self) { + return self._outputs; + }); + + cls.def_property_readonly("input_tensors", [](InferRequestWrapper& self) { + std::vector tensors; + for (auto&& node : self._inputs) { + tensors.push_back(self._request.get_tensor(node)); + } + return tensors; + }); + + cls.def_property_readonly("output_tensors", [](InferRequestWrapper& self) { + std::vector tensors; + for (auto&& node : self._outputs) { + tensors.push_back(self._request.get_tensor(node)); + } + return tensors; + }); + + cls.def_property_readonly("latency", [](InferRequestWrapper& self) { + return self.get_latency(); + }); + + cls.def_property_readonly("profiling_info", [](InferRequestWrapper& self) { + return self._request.get_profiling_info(); + }); +} diff --git a/runtime/bindings/python/src/pyopenvino/core/infer_request.hpp b/runtime/bindings/python/src/pyopenvino/core/infer_request.hpp new file mode 100644 index 00000000000000..3ea9859db1fcc8 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/core/infer_request.hpp @@ -0,0 +1,47 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include + +#include + +namespace py = pybind11; + +typedef std::chrono::high_resolution_clock Time; +typedef std::chrono::nanoseconds ns; + +class InferRequestWrapper { +public: + InferRequestWrapper(ov::runtime::InferRequest request) + : _request(request) + { + } + + InferRequestWrapper(ov::runtime::InferRequest request, const std::vector>& inputs, const std::vector>& outputs) + : _request(request), _inputs(inputs), _outputs(outputs) + { + } + // ~InferRequestWrapper() = default; + + bool user_callback_defined = false; + py::object userdata; + + double get_latency() { + auto execTime = std::chrono::duration_cast(_end_time - _start_time); + return static_cast(execTime.count()) * 0.000001; + } + + ov::runtime::InferRequest _request; + std::vector> _inputs; + std::vector> _outputs; + + Time::time_point _start_time; + Time::time_point _end_time; +}; + +void regclass_InferRequest(py::module m); diff --git a/runtime/bindings/python/src/pyopenvino/core/profiling_info.cpp b/runtime/bindings/python/src/pyopenvino/core/profiling_info.cpp new file mode 100644 index 00000000000000..9619e070386017 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/core/profiling_info.cpp @@ -0,0 +1,28 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "pyopenvino/core/profiling_info.hpp" + +#include + +#include "openvino/runtime/profiling_info.hpp" + +namespace py = pybind11; + +void regclass_ProfilingInfo(py::module m) { + py::class_> cls(m, "ProfilingInfo"); + cls.def(py::init<>()) + .def_readwrite("status", &ov::runtime::ProfilingInfo::status) + .def_readwrite("real_time", &ov::runtime::ProfilingInfo::real_time) + .def_readwrite("cpu_time", &ov::runtime::ProfilingInfo::cpu_time) + .def_readwrite("node_name", &ov::runtime::ProfilingInfo::node_name) + .def_readwrite("exec_type", &ov::runtime::ProfilingInfo::exec_type) + .def_readwrite("node_type", &ov::runtime::ProfilingInfo::node_type); + + py::enum_(cls, "Status") + .value("NOT_RUN", ov::runtime::ProfilingInfo::Status::NOT_RUN) + .value("OPTIMIZED_OUT", ov::runtime::ProfilingInfo::Status::OPTIMIZED_OUT) + .value("EXECUTED", ov::runtime::ProfilingInfo::Status::EXECUTED) + .export_values(); +} diff --git a/runtime/bindings/python/src/pyopenvino/core/profiling_info.hpp b/runtime/bindings/python/src/pyopenvino/core/profiling_info.hpp new file mode 100644 index 00000000000000..023935f620e913 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/core/profiling_info.hpp @@ -0,0 +1,11 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +namespace py = pybind11; + +void regclass_ProfilingInfo(py::module m); diff --git a/runtime/bindings/python/src/pyopenvino/core/version.cpp b/runtime/bindings/python/src/pyopenvino/core/version.cpp new file mode 100644 index 00000000000000..bed253697fda06 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/core/version.cpp @@ -0,0 +1,24 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/core/version.hpp" + +#include + +namespace py = pybind11; + +void regclass_Version(py::module m) { + py::class_ cls(m, "Version"); + + cls.def_readonly("build_number", &ov::Version::buildNumber); + cls.def_readonly("description", &ov::Version::description); + + cls.def_property_readonly("major", [](ov::Version& self) { + return OPENVINO_VERSION_MAJOR; + }); + + cls.def_property_readonly("minor", [](ov::Version& self) { + return OPENVINO_VERSION_MINOR; + }); +} diff --git a/runtime/bindings/python/src/pyopenvino/core/ie_version.hpp b/runtime/bindings/python/src/pyopenvino/core/version.hpp similarity index 100% rename from runtime/bindings/python/src/pyopenvino/core/ie_version.hpp rename to runtime/bindings/python/src/pyopenvino/core/version.hpp diff --git a/runtime/bindings/python/src/pyopenvino/graph/function.cpp b/runtime/bindings/python/src/pyopenvino/graph/function.cpp index 3496e88d0149dd..27e3d18b0ec079 100644 --- a/runtime/bindings/python/src/pyopenvino/graph/function.cpp +++ b/runtime/bindings/python/src/pyopenvino/graph/function.cpp @@ -7,6 +7,7 @@ #include #include +#include "openvino/core/partial_shape.hpp" #include "openvino/op/parameter.hpp" // ov::op::v0::Parameter #include "openvino/op/sink.hpp" #include "pyopenvino/graph/function.hpp" @@ -15,6 +16,16 @@ namespace py = pybind11; static const char* CAPSULE_NAME = "ngraph_function"; +void set_tensor_names(const ov::ParameterVector& parameters) { + for (const auto& param : parameters) { + ov::Output p = param; + if (p.get_node()->output(0).get_names().empty()) { + std::unordered_set p_names({p.get_node()->get_friendly_name()}); + p.get_node()->output(0).set_names(p_names); + } + } +} + void regclass_graph_Function(py::module m) { py::class_> function(m, "Function", py::module_local()); function.doc() = "openvino.impl.Function wraps ov::Function"; @@ -53,12 +64,15 @@ void regclass_graph_Function(py::module m) { String to set as function's friendly name. )"); - function.def(py::init>&, - const std::vector>&, - const std::string&>(), + function.def(py::init([](const std::vector>& results, + const ov::ParameterVector& parameters, + const std::string& name) { + set_tensor_names(parameters); + return std::make_shared(results, parameters, name); + }), py::arg("results"), py::arg("parameters"), - py::arg("name"), + py::arg("name") = "", R"( Create user-defined Function which is a representation of a model. @@ -74,12 +88,15 @@ void regclass_graph_Function(py::module m) { String to set as function's friendly name. )"); - function.def(py::init&, - const std::vector>&, - const std::string&>(), + function.def(py::init([](const std::shared_ptr& results, + const ov::ParameterVector& parameters, + const std::string& name) { + set_tensor_names(parameters); + return std::make_shared(results, parameters, name); + }), py::arg("result"), py::arg("parameters"), - py::arg("name"), + py::arg("name") = "", R"( Create user-defined Function which is a representation of a model. @@ -94,6 +111,41 @@ void regclass_graph_Function(py::module m) { name : str String to set as function's friendly name. )"); + + function.def( + "reshape", + [](ov::Function& self, const std::map& partial_shapes) { + self.reshape(partial_shapes); + }, + py::arg("partial_shapes"), + R"( + Parameters + ---------- + partial_shapes : Dict[string, PartialShape] + Index of Output. + + Returns + ---------- + reshape : void + )"); + + function.def( + "reshape", + [](ov::Function& self, const std::map, ov::PartialShape>& partial_shapes) { + self.reshape(partial_shapes); + }, + py::arg("partial_shapes"), + R"( + Parameters + ---------- + partial_shapes : Dict[Output, PartialShape] + Index of Output. + + Returns + ---------- + reshape : void + )"); + function.def("get_output_size", &ov::Function::get_output_size, R"( @@ -264,6 +316,42 @@ void regclass_graph_Function(py::module m) { ---------- is_dynamic : bool )"); + function.def("input", (ov::Output(ov::Function::*)()) & ov::Function::input); + + function.def("input", (ov::Output(ov::Function::*)(size_t)) & ov::Function::input, py::arg("i")); + + function.def("input", + (ov::Output(ov::Function::*)(const std::string&)) & ov::Function::input, + py::arg("tensor_name")); + + function.def("input", (ov::Output(ov::Function::*)() const) & ov::Function::input); + + function.def("input", + (ov::Output(ov::Function::*)(size_t) const) & ov::Function::input, + py::arg("i")); + + function.def("input", + (ov::Output(ov::Function::*)(const std::string&) const) & ov::Function::input, + py::arg("tensor_name")); + + function.def("output", (ov::Output(ov::Function::*)()) & ov::Function::output); + + function.def("output", (ov::Output(ov::Function::*)(size_t)) & ov::Function::output, py::arg("i")); + + function.def("output", + (ov::Output(ov::Function::*)(const std::string&)) & ov::Function::output, + py::arg("tensor_name")); + + function.def("output", (ov::Output(ov::Function::*)() const) & ov::Function::output); + + function.def("output", + (ov::Output(ov::Function::*)(size_t) const) & ov::Function::output, + py::arg("i")); + + function.def("output", + (ov::Output(ov::Function::*)(const std::string&) const) & ov::Function::output, + py::arg("tensor_name")); + function.def("__repr__", [](const ov::Function& self) { std::string class_name = py::cast(self).get_type().attr("__name__").cast(); std::stringstream shapes_ss; @@ -309,6 +397,16 @@ void regclass_graph_Function(py::module m) { return pybind_capsule; }); + function.def_property_readonly("inputs", + (std::vector>(ov::Function::*)()) & ov::Function::inputs); + function.def_property_readonly( + "inputs", + (std::vector>(ov::Function::*)() const) & ov::Function::inputs); + function.def_property_readonly("outputs", + (std::vector>(ov::Function::*)()) & ov::Function::outputs); + function.def_property_readonly( + "outputs", + (std::vector>(ov::Function::*)() const) & ov::Function::outputs); function.def_property_readonly("name", &ov::Function::get_name); function.def_property("friendly_name", &ov::Function::get_friendly_name, &ov::Function::set_friendly_name); } diff --git a/runtime/bindings/python/src/pyopenvino/graph/node_output.cpp b/runtime/bindings/python/src/pyopenvino/graph/node_output.cpp index 56ac60e3ba9f64..8d1cfcec5bb7d0 100644 --- a/runtime/bindings/python/src/pyopenvino/graph/node_output.cpp +++ b/runtime/bindings/python/src/pyopenvino/graph/node_output.cpp @@ -11,68 +11,5 @@ namespace py = pybind11; -void regclass_graph_Output(py::module m) { - py::class_, std::shared_ptr>> output(m, "Output", py::dynamic_attr()); - output.doc() = "openvino.impl.Output wraps ov::Output"; - - output.def("get_node", - &ov::Output::get_node, - R"( - Get node referenced by this output handle. - - Returns - ---------- - get_node : Node - Node object referenced by this output handle. - )"); - output.def("get_index", - &ov::Output::get_index, - R"( - The index of the output referred to by this output handle. - - Returns - ---------- - get_index : int - Index value as integer. - )"); - output.def("get_element_type", - &ov::Output::get_element_type, - R"( - The element type of the output referred to by this output handle. - - Returns - ---------- - get_element_type : Type - Type of the output. - )"); - output.def("get_shape", - &ov::Output::get_shape, - R"( - The shape of the output referred to by this output handle. - - Returns - ---------- - get_shape : Shape - Shape of the output. - )"); - output.def("get_partial_shape", - &ov::Output::get_partial_shape, - R"( - The partial shape of the output referred to by this output handle. - - Returns - ---------- - get_partial_shape : PartialShape - PartialShape of the output. - )"); - output.def("get_target_inputs", - &ov::Output::get_target_inputs, - R"( - A set containing handles for all inputs targeted by the output - referenced by this output handle. - Returns - ---------- - get_target_inputs : Set[Input] - Set of Inputs. - )"); -} +template void regclass_graph_Output(py::module m, std::string typestring); +template void regclass_graph_Output(py::module m, std::string typestring); diff --git a/runtime/bindings/python/src/pyopenvino/graph/node_output.hpp b/runtime/bindings/python/src/pyopenvino/graph/node_output.hpp index 9934c628b2e098..a88722ebc18448 100644 --- a/runtime/bindings/python/src/pyopenvino/graph/node_output.hpp +++ b/runtime/bindings/python/src/pyopenvino/graph/node_output.hpp @@ -5,7 +5,80 @@ #pragma once #include +#include + +#include "openvino/core/node_output.hpp" namespace py = pybind11; -void regclass_graph_Output(py::module m); +template +void regclass_graph_Output(py::module m, std::string typestring) +{ + auto pyclass_name = py::detail::c_str((typestring + std::string("Output"))); + auto docs = py::detail::c_str((std::string("openvino.impl.") + typestring + std::string("Output wraps ov::Output<") + typestring + std::string(" ov::Node >"))); + py::class_, std::shared_ptr>> output(m, + pyclass_name, + py::dynamic_attr()); + output.doc() = docs; + + output.def("get_node", + &ov::Output::get_node, + R"( + Get node referenced by this output handle. + + Returns + ---------- + get_node : Node or const Node + Node object referenced by this output handle. + )"); + output.def("get_index", + &ov::Output::get_index, + R"( + The index of the output referred to by this output handle. + + Returns + ---------- + get_index : int + Index value as integer. + )"); + output.def("get_element_type", + &ov::Output::get_element_type, + R"( + The element type of the output referred to by this output handle. + + Returns + ---------- + get_element_type : Type + Type of the output. + )"); + output.def("get_shape", + &ov::Output::get_shape, + R"( + The shape of the output referred to by this output handle. + + Returns + ---------- + get_shape : Shape + Shape of the output. + )"); + output.def("get_partial_shape", + &ov::Output::get_partial_shape, + R"( + The partial shape of the output referred to by this output handle. + + Returns + ---------- + get_partial_shape : PartialShape + PartialShape of the output. + )"); + output.def("get_target_inputs", + &ov::Output::get_target_inputs, + R"( + A set containing handles for all inputs targeted by the output + referenced by this output handle. + Returns + ---------- + get_target_inputs : Set[Input] + Set of Inputs. + )"); +} diff --git a/runtime/bindings/python/src/pyopenvino/pyopenvino.cpp b/runtime/bindings/python/src/pyopenvino/pyopenvino.cpp index 7611244a38b410..d0088018454005 100644 --- a/runtime/bindings/python/src/pyopenvino/pyopenvino.cpp +++ b/runtime/bindings/python/src/pyopenvino/pyopenvino.cpp @@ -1,11 +1,10 @@ // Copyright (C) 2021 Intel Corporation // SPDX-License-Identifier: Apache-2.0 -#include #include -#include -#include +#include +#include #include #include "pyopenvino/graph/axis_set.hpp" @@ -21,20 +20,21 @@ # include "pyopenvino/graph/onnx_import/onnx_import.hpp" #endif #include "pyopenvino/core/containers.hpp" +#include "pyopenvino/core/core.hpp" +#include "pyopenvino/core/executable_network.hpp" #include "pyopenvino/core/ie_blob.hpp" -#include "pyopenvino/core/ie_core.hpp" #include "pyopenvino/core/ie_data.hpp" -#include "pyopenvino/core/ie_executable_network.hpp" #include "pyopenvino/core/ie_infer_queue.hpp" -#include "pyopenvino/core/ie_infer_request.hpp" #include "pyopenvino/core/ie_input_info.hpp" #include "pyopenvino/core/ie_network.hpp" #include "pyopenvino/core/ie_parameter.hpp" #include "pyopenvino/core/ie_preprocess_info.hpp" -#include "pyopenvino/core/ie_version.hpp" +#include "pyopenvino/core/infer_request.hpp" #include "pyopenvino/core/offline_transformations.hpp" +#include "pyopenvino/core/profiling_info.hpp" #include "pyopenvino/core/tensor.hpp" #include "pyopenvino/core/tensor_description.hpp" +#include "pyopenvino/core/version.hpp" #include "pyopenvino/graph/dimension.hpp" #include "pyopenvino/graph/layout.hpp" #include "pyopenvino/graph/ops/constant.hpp" @@ -53,9 +53,9 @@ namespace py = pybind11; std::string get_version() { - auto version = InferenceEngine::GetInferenceEngineVersion(); - std::string version_str = std::to_string(version->apiVersion.major) + "."; - version_str += std::to_string(version->apiVersion.minor) + "."; + auto version = ov::get_openvino_version(); + std::string version_str = std::to_string(OPENVINO_VERSION_MAJOR) + "."; + version_str += std::to_string(OPENVINO_VERSION_MINOR) + "."; version_str += version->buildNumber; return version_str; } @@ -63,26 +63,6 @@ std::string get_version() { PYBIND11_MODULE(pyopenvino, m) { m.doc() = "Package openvino.pyopenvino which wraps openvino C++ APIs"; m.def("get_version", &get_version); - py::enum_(m, "StatusCode") - .value("OK", InferenceEngine::StatusCode::OK) - .value("GENERAL_ERROR", InferenceEngine::StatusCode::GENERAL_ERROR) - .value("NOT_IMPLEMENTED", InferenceEngine::StatusCode::NOT_IMPLEMENTED) - .value("NETWORK_NOT_LOADED", InferenceEngine::StatusCode::NETWORK_NOT_LOADED) - .value("PARAMETER_MISMATCH", InferenceEngine::StatusCode::PARAMETER_MISMATCH) - .value("NOT_FOUND", InferenceEngine::StatusCode::NOT_FOUND) - .value("OUT_OF_BOUNDS", InferenceEngine::StatusCode::OUT_OF_BOUNDS) - .value("UNEXPECTED", InferenceEngine::StatusCode::UNEXPECTED) - .value("REQUEST_BUSY", InferenceEngine::StatusCode::REQUEST_BUSY) - .value("RESULT_NOT_READY", InferenceEngine::StatusCode::RESULT_NOT_READY) - .value("NOT_ALLOCATED", InferenceEngine::StatusCode::NOT_ALLOCATED) - .value("INFER_NOT_STARTED", InferenceEngine::StatusCode::INFER_NOT_STARTED) - .value("NETWORK_NOT_READ", InferenceEngine::StatusCode::NETWORK_NOT_READ) - .export_values(); - - py::enum_(m, "WaitMode") - .value("RESULT_READY", InferenceEngine::IInferRequest::WaitMode::RESULT_READY) - .value("STATUS_ONLY", InferenceEngine::IInferRequest::WaitMode::STATUS_ONLY) - .export_values(); regclass_graph_PyRTMap(m); regmodule_graph_types(m); @@ -92,7 +72,6 @@ PYBIND11_MODULE(pyopenvino, m) { regclass_graph_PartialShape(m); regclass_graph_Node(m); regclass_graph_Input(m); - regclass_graph_Output(m); regclass_graph_NodeFactory(m); regclass_graph_Strides(m); regclass_graph_CoordinateDiff(m); @@ -113,6 +92,8 @@ PYBIND11_MODULE(pyopenvino, m) { regclass_graph_Variant(m); regclass_graph_VariantWrapper(m, std::string("String")); regclass_graph_VariantWrapper(m, std::string("Int")); + regclass_graph_Output(m, std::string("")); + regclass_graph_Output(m, std::string("Const")); regclass_Core(m); regclass_IENetwork(m); @@ -138,17 +119,16 @@ PYBIND11_MODULE(pyopenvino, m) { regclass_Tensor(m); // Registering specific types of containers - Containers::regclass_PyInputsDataMap(m); - Containers::regclass_PyConstInputsDataMap(m); - Containers::regclass_PyOutputsDataMap(m); - Containers::regclass_PyResults(m); + Containers::regclass_TensorIndexMap(m); + Containers::regclass_TensorNameMap(m); regclass_ExecutableNetwork(m); regclass_InferRequest(m); regclass_Version(m); regclass_Parameter(m); regclass_InputInfo(m); - regclass_InferQueue(m); + // regclass_InferQueue(m); + regclass_ProfilingInfo(m); regclass_PreProcessInfo(m); regmodule_offline_transformations(m); diff --git a/runtime/bindings/python/tests/__init__.py b/runtime/bindings/python/tests/__init__.py index 07e4bd87d06424..2804ebe61d2908 100644 --- a/runtime/bindings/python/tests/__init__.py +++ b/runtime/bindings/python/tests/__init__.py @@ -24,6 +24,7 @@ def xfail_test(reason="Mark the test as expected to fail", strict=True): skip_segfault = pytest.mark.skip(reason="Segmentation fault error") xfail_accuracy = xfail_test(reason="Accuracy") +xfail_issue_FLOAT_LIKE = xfail_test(reason="Use of bfloat16 or float16") xfail_issue_69444 = xfail_test(reason="failed with accuracy issue") xfail_issue_69443 = xfail_test(reason="Error in ref. implementation due to the empty pads_begin, pads_end") skip_issue_67415 = pytest.mark.skip(reason="RuntimeError: Unsupported data type for when filling blob!") @@ -141,3 +142,4 @@ def xfail_test(reason="Mark the test as expected to fail", strict=True): xfail_issue_63138 = xfail_test(reason="Missing ONNX Shape-15 support") xfail_issue_63643 = xfail_test(reason="RuntimeError: Unsupported operation of type: Convolution name") xfail_issue_54663 = xfail_test(reason="Disabled until MaxPool-8 is supported on CPU") +xfail_issue_68212 = xfail_test(reason="Unsupported reading model with bytes streams") diff --git a/runtime/bindings/python/tests/conftest.py b/runtime/bindings/python/tests/conftest.py index f7963486cf55af..55fc20acc4f500 100644 --- a/runtime/bindings/python/tests/conftest.py +++ b/runtime/bindings/python/tests/conftest.py @@ -78,6 +78,7 @@ def pytest_configure(config): config.addinivalue_line("markers", "skip_on_hetero: Skip test on HETERO") config.addinivalue_line("markers", "skip_on_template: Skip test on TEMPLATE") config.addinivalue_line("markers", "onnx_coverage: Collect ONNX operator coverage") + config.addinivalue_line("markers", "template_extension") config.addinivalue_line("markers", "dynamic_library: Runs tests only in dynamic libraries case") diff --git a/runtime/bindings/python/tests/runtime.py b/runtime/bindings/python/tests/runtime.py index cc5c49620a6c5e..61712cfd314cd8 100644 --- a/runtime/bindings/python/tests/runtime.py +++ b/runtime/bindings/python/tests/runtime.py @@ -7,11 +7,11 @@ from typing import Dict, List, Union import numpy as np -from openvino import Core, IENetwork, Blob, DataPtr + +from openvino import Core from openvino.exceptions import UserInputError from openvino.impl import Function, Node, PartialShape, Type -from openvino.opset1.ops import result from openvino.utils.types import NumericData, get_shape, get_dtype import tests @@ -32,46 +32,6 @@ def get_runtime(): return runtime() -def _convert_inputs(cnn_network: IENetwork) -> None: - """WA converts unsupported input images formats.""" - precision_map = { - "FP64": "FP32", - "I64": "I32", - "U32": "I32", - } - - for cnn_input in cnn_network.input_info: - try: - _precision = precision_map[cnn_network.input_info[cnn_input].precision] - cnn_network.input_info[cnn_input].precision = _precision - except KeyError: - pass - - -def _convert_val(val): - """WA converts unsupported input values.""" - if type(val) is np.ndarray: - if val.dtype == np.float64: - return np.array(val, dtype=np.float32) - elif val.dtype == np.int64: - return np.array(val, dtype=np.int32) - return np.array(val) - - return np.array(val, dtype=np.float32) - - -def apply_ng_type(output: DataPtr, ng_type: Type): - ng_ie_supported_type_map = { - Type.boolean.get_type_name(): "BOOL", - Type.f32.get_type_name(): "FP32", - Type.i8.get_type_name(): "I8", - Type.i32.get_type_name(): "I32", - Type.u8.get_type_name(): "U8", - } - if ng_type.get_type_name() in ng_ie_supported_type_map: - output.precision = ng_ie_supported_type_map[ng_type.get_type_name()] - - class Runtime(object): """Represents an nGraph runtime environment.""" @@ -120,25 +80,6 @@ def __repr__(self) -> str: params_string = ", ".join([param.name for param in self.parameters]) return "".format(self.function.get_name(), params_string) - def _get_ie_output_blob_name(self, outputs: Dict, ng_result: result) -> str: - if len(self.results) == 1: - return next(iter(outputs.keys())) - else: - prev_layer = ng_result.input(0).get_source_output() - out_name = prev_layer.get_node().get_friendly_name() - if prev_layer.get_node().get_output_size() != 1: - out_name += "." + str(prev_layer.get_index()) - return out_name - - def _get_ie_output_blob_buffer(self, output_blobs: Dict[str, Blob], ng_result: result) -> np.ndarray: - out_name = self._get_ie_output_blob_name(output_blobs, ng_result) - out_blob = output_blobs[out_name] - - if out_blob.tensor_desc.layout == "SCALAR": - return out_blob.buffer.reshape(()) - else: - return out_blob.buffer - def convert_buffers(self, source_buffers, target_dtypes): converted_buffers = [] for i in range(len(source_buffers)): @@ -157,30 +98,26 @@ def __call__(self, *input_values: NumericData) -> List[NumericData]: raise UserInputError( "Expected %s params, received not enough %s values.", len(self.parameters), len(input_values) ) - # ignore not needed input values - input_values = input_values[:len(self.parameters)] - - input_values = [_convert_val(input_value) for input_value in input_values] - input_shapes = [get_shape(input_value) for input_value in input_values] + param_types = [param.get_element_type() for param in self.parameters] param_names = [param.friendly_name for param in self.parameters] + # ignore not needed input values + input_values = [ + np.array(input_value[0], dtype=get_dtype(input_value[1])) + for input_value in zip(input_values[: len(self.parameters)], param_types) + ] + input_shapes = [get_shape(input_value) for input_value in input_values] + if self.network_cache.get(str(input_shapes)) is None: - cnn_network = IENetwork(self.function) + function = self.function if self.function.is_dynamic(): - cnn_network.reshape(dict(zip(param_names, input_shapes))) - # Convert unsupported inputs of the network - _convert_inputs(cnn_network) - self.network_cache[str(input_shapes)] = cnn_network + function.reshape(dict(zip(param_names, [PartialShape(i) for i in input_shapes]))) + self.network_cache[str(input_shapes)] = function else: - cnn_network = self.network_cache[str(input_shapes)] - - # set output blobs precission based on nG results - for ng_result in self.results: - ie_out_name = self._get_ie_output_blob_name(cnn_network.outputs, ng_result) - apply_ng_type(cnn_network.outputs[ie_out_name], ng_result.get_output_element_type(0)) + function = self.network_cache[str(input_shapes)] - executable_network = self.runtime.backend.load_network(cnn_network, self.runtime.backend_name) + executable_network = self.runtime.backend.compile_model(function, self.runtime.backend_name) for parameter, input in zip(self.parameters, input_values): parameter_shape = parameter.get_output_partial_shape(0) @@ -193,13 +130,16 @@ def __call__(self, *input_values: NumericData) -> List[NumericData]: ) request = executable_network.create_infer_request() - request.infer(dict(zip(param_names, input_values))) - - # Set order of output blobs compatible with nG Function - result_buffers = [self._get_ie_output_blob_buffer(request.output_blobs, result) - for result in self.results] - - # Since OV overwrite result data type we have to convert results to the original one. + result_buffers = request.infer(dict(zip(param_names, input_values))) + # # Note: other methods to get result_buffers from request + # # First call infer with no return value: + # request.infer(dict(zip(param_names, input_values))) + # # Now use any of following options: + # result_buffers = [request.get_tensor(n).data for n in request.outputs] + # result_buffers = [request.get_output_tensor(i).data for i in range(len(request.outputs))] + # result_buffers = [t.data for t in request.output_tensors] + + # # Since OV overwrite result data type we have to convert results to the original one. original_dtypes = [get_dtype(result.get_output_element_type(0)) for result in self.results] converted_buffers = self.convert_buffers(result_buffers, original_dtypes) return converted_buffers diff --git a/runtime/bindings/python/tests/test_inference_engine/helpers.py b/runtime/bindings/python/tests/test_inference_engine/helpers.py deleted file mode 100644 index db48c1e9298da7..00000000000000 --- a/runtime/bindings/python/tests/test_inference_engine/helpers.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright (C) 2021 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np -import cv2 -import os - - -def image_path(): - path_to_repo = os.environ["DATA_PATH"] - path_to_img = os.path.join(path_to_repo, "validation_set", "224x224", "dog.bmp") - return path_to_img - - -def model_path(is_myriad=False): - path_to_repo = os.environ["MODELS_PATH"] - if not is_myriad: - test_xml = os.path.join(path_to_repo, "models", "test_model", "test_model_fp32.xml") - test_bin = os.path.join(path_to_repo, "models", "test_model", "test_model_fp32.bin") - else: - test_xml = os.path.join(path_to_repo, "models", "test_model", "test_model_fp16.xml") - test_bin = os.path.join(path_to_repo, "models", "test_model", "test_model_fp16.bin") - return (test_xml, test_bin) - - -def read_image(): - n, c, h, w = (1, 3, 32, 32) - image = cv2.imread(image_path()) - if image is None: - raise FileNotFoundError("Input image not found") - - image = cv2.resize(image, (h, w)) / 255 - image = image.transpose((2, 0, 1)).astype(np.float32) - image = image.reshape((n, c, h, w)) - return image diff --git a/runtime/bindings/python/tests/test_inference_engine/test_core.py b/runtime/bindings/python/tests/test_inference_engine/test_core.py index febb7226d88e13..d5ee36a2b2ab03 100644 --- a/runtime/bindings/python/tests/test_inference_engine/test_core.py +++ b/runtime/bindings/python/tests/test_inference_engine/test_core.py @@ -8,9 +8,8 @@ from pathlib import Path import openvino.opset8 as ov -from openvino import Core, IENetwork, ExecutableNetwork, blob_from_file -from openvino.impl import Function, Shape, Type -from openvino.impl.op import Parameter +from openvino import Core, IENetwork, ExecutableNetwork, tensor_from_file +from openvino.impl import Function from openvino import TensorDesc, Blob from ..conftest import model_path, model_onnx_path, plugins_path @@ -40,7 +39,7 @@ def test_blobs(): @pytest.mark.skip(reason="Fix") -def test_ie_core_class(): +def test_core_class(): input_shape = [1, 3, 4, 4] param = ov.parameter(input_shape, np.float32, name="parameter") relu = ov.relu(param, name="relu") @@ -49,9 +48,9 @@ def test_ie_core_class(): cnn_network = IENetwork(func) - ie_core = Core() - ie_core.set_config({}, device_name="CPU") - executable_network = ie_core.load_network(cnn_network, "CPU", {}) + core = Core() + core.set_config({}, device_name="CPU") + executable_network = core.compile_model(cnn_network, "CPU", {}) td = TensorDesc("FP32", input_shape, "NCHW") @@ -72,96 +71,82 @@ def test_ie_core_class(): assert np.allclose(result, expected_output) -def test_load_network(device): +def test_compile_model(device): ie = Core() - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - exec_net = ie.load_network(net, device) + func = ie.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = ie.compile_model(func, device) assert isinstance(exec_net, ExecutableNetwork) -def test_read_network(): - ie_core = Core() - net = ie_core.read_network(model=test_net_xml, weights=test_net_bin) - assert isinstance(net, IENetwork) +def test_read_model_from_ir(): + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + assert isinstance(func, Function) - net = ie_core.read_network(model=test_net_xml) - assert isinstance(net, IENetwork) + func = core.read_model(model=test_net_xml) + assert isinstance(func, Function) -def test_read_network_from_blob(): - ie_core = Core() +def test_read_model_from_tensor(): + core = Core() model = open(test_net_xml).read() - blob = blob_from_file(test_net_bin) - net = ie_core.read_network(model=model, blob=blob) - assert isinstance(net, IENetwork) + tensor = tensor_from_file(test_net_bin) + func = core.read_model(model=model, weights=tensor) + assert isinstance(func, Function) -def test_read_network_from_blob_valid(): - ie_core = Core() - model = open(test_net_xml).read() - blob = blob_from_file(test_net_bin) - net = ie_core.read_network(model=model, blob=blob) - ref_net = ie_core.read_network(model=test_net_xml, weights=test_net_bin) - assert net.name == ref_net.name - assert net.batch_size == ref_net.batch_size - ii_net = net.input_info - ii_net2 = ref_net.input_info - o_net = net.outputs - o_net2 = ref_net.outputs - assert ii_net.keys() == ii_net2.keys() - assert o_net.keys() == o_net2.keys() - - -def test_read_network_as_path(): - ie_core = Core() - net = ie_core.read_network(model=Path(test_net_xml), weights=Path(test_net_bin)) - assert isinstance(net, IENetwork) +def test_read_model_as_path(): + core = Core() + func = core.read_model(model=Path(test_net_xml), weights=Path(test_net_bin)) + assert isinstance(func, Function) - net = ie_core.read_network(model=test_net_xml, weights=Path(test_net_bin)) - assert isinstance(net, IENetwork) + func = core.read_model(model=test_net_xml, weights=Path(test_net_bin)) + assert isinstance(func, Function) - net = ie_core.read_network(model=Path(test_net_xml)) - assert isinstance(net, IENetwork) + func = core.read_model(model=Path(test_net_xml)) + assert isinstance(func, Function) -def test_read_network_from_onnx(): - ie_core = Core() - net = ie_core.read_network(model=test_net_onnx) - assert isinstance(net, IENetwork) +def test_read_model_from_onnx(): + core = Core() + func = core.read_model(model=test_net_onnx) + assert isinstance(func, Function) -def test_read_network_from_onnx_as_path(): - ie_core = Core() - net = ie_core.read_network(model=Path(test_net_onnx)) - assert isinstance(net, IENetwork) +def test_read_model_from_onnx_as_path(): + core = Core() + func = core.read_model(model=Path(test_net_onnx)) + assert isinstance(func, Function) +@pytest.mark.xfail("68212") def test_read_net_from_buffer(): - ie_core = Core() + core = Core() with open(test_net_bin, "rb") as f: bin = f.read() with open(model_path()[0], "rb") as f: xml = f.read() - net = ie_core.read_network(model=xml, weights=bin) - assert isinstance(net, IENetwork) + func = core.read_model(model=xml, weights=bin) + assert isinstance(func, IENetwork) +@pytest.mark.xfail("68212") def test_net_from_buffer_valid(): - ie_core = Core() + core = Core() with open(test_net_bin, "rb") as f: bin = f.read() with open(model_path()[0], "rb") as f: xml = f.read() - net = ie_core.read_network(model=xml, weights=bin) - ref_net = ie_core.read_network(model=test_net_xml, weights=test_net_bin) - assert net.name == ref_net.name - assert net.batch_size == ref_net.batch_size - ii_net = net.input_info - ii_net2 = ref_net.input_info - o_net = net.outputs - o_net2 = ref_net.outputs - assert ii_net.keys() == ii_net2.keys() - assert o_net.keys() == o_net2.keys() + func = core.read_model(model=xml, weights=bin) + ref_func = core.read_model(model=test_net_xml, weights=test_net_bin) + assert func.name == func.name + assert func.batch_size == ref_func.batch_size + ii_func = func.input_info + ii_func2 = ref_func.input_info + o_func = func.outputs + o_func2 = ref_func.outputs + assert ii_func.keys() == ii_func2.keys() + assert o_func.keys() == o_func2.keys() def test_get_version(device): @@ -230,15 +215,14 @@ def test_get_metric_str(): f"metric must be string but {type(param)} is returned" -def test_query_network(device): +def test_query_model(device): ie = Core() - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - query_res = ie.query_network(network=net, device_name=device) - func_net = net.get_function() - ops_net = func_net.get_ordered_ops() - ops_net_names = [op.friendly_name for op in ops_net] - assert [key for key in query_res.keys() if key not in ops_net_names] == [], \ - "Not all network layers present in query_network results" + func = ie.read_model(model=test_net_xml, weights=test_net_bin) + query_res = ie.query_model(model=func, device_name=device) + ops_func = func.get_ordered_ops() + ops_func_names = [op.friendly_name for op in ops_func] + assert [key for key in query_res.keys() if key not in ops_func_names] == [], \ + "Not all network layers present in query_model results" assert next(iter(set(query_res.values()))) == device, "Wrong device for some layers" @@ -247,8 +231,8 @@ def test_query_network(device): def test_register_plugin(): ie = Core() ie.register_plugin("MKLDNNPlugin", "BLA") - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - exec_net = ie.load_network(net, "BLA") + func = ie.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = ie.compile_model(func, "BLA") assert isinstance(exec_net, ExecutableNetwork), \ "Cannot load the network to the registered plugin with name 'BLA'" @@ -264,21 +248,92 @@ def test_register_plugins(): elif platform == "win32": ie.register_plugins(plugins_win_xml) - net = ie.read_network(model=test_net_xml, weights=test_net_bin) - exec_net = ie.load_network(net, "CUSTOM") + func = ie.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = ie.compile_model(func, "CUSTOM") assert isinstance(exec_net, ExecutableNetwork), "Cannot load the network to " \ "the registered plugin with name 'CUSTOM' " \ - "registred in the XML file" + "registered in the XML file" -def test_create_IENetwork_from_nGraph(): - element_type = Type.f32 - param = Parameter(element_type, Shape([1, 3, 22, 22])) - relu = ov.relu(param) - func = Function([relu], [param], "test") - cnnNetwork = IENetwork(func) - assert cnnNetwork is not None - func2 = cnnNetwork.get_function() - assert func2 is not None - assert len(func2.get_ops()) == 3 +@pytest.mark.skip(reason="Need to figure out if it's expected behaviour (fails with C++ API as well") +def test_unregister_plugin(device): + ie = Core() + ie.unload_plugin(device) + func = ie.read_model(model=test_net_xml, weights=test_net_bin) + with pytest.raises(RuntimeError) as e: + ie.load_network(func, device) + assert f"Device with '{device}' name is not registered in the InferenceEngine" in str(e.value) + + +@pytest.mark.xfail("68212") +@pytest.mark.template_extension +def test_add_extension(device): + model = bytes(b""" + + + + + + 2 + 2 + 2 + 1 + + + + + + + + 2 + 2 + 2 + 1 + + + + + 2 + 2 + 2 + 1 + + + + + + + 2 + 2 + 2 + 1 + + + + + + + + +""") + + core = Core() + if platform == "win32": + core.add_extension(extension_path="template_extension.dll") + else: + core.add_extension(extension_path="libtemplate_extension.so") + func = core.read_model(model=model, init_from_buffer=True) + assert isinstance(func, Function) + + # input_blob = next(iter(network.input_info)) + # n, c, h, w = network.input_info[input_blob].input_data.shape + + # input_values = np.ndarray(buffer=np.array([1, 2, 3, 4, 5, 6, 7, 8]), shape = (n, c, h, w), dtype=int) + # expected = np.ndarray(buffer=np.array([12, 13, 14, 15, 16, 17, 18, 19]), + # shape = (n, c, h, w), dtype=int) + # + # exec_network = core.compile_model(func, device) + # computed = exec_network.infer_new_request(inputs={input_blob : input_values}) + # output_blob = next(iter(network.outputs)) + # assert np.allclose(expected, computed[output_blob], atol=1e-2, rtol=1e-2) diff --git a/runtime/bindings/python/tests/test_inference_engine/test_executable_network.py b/runtime/bindings/python/tests/test_inference_engine/test_executable_network.py new file mode 100644 index 00000000000000..2a2e80b6c8cd52 --- /dev/null +++ b/runtime/bindings/python/tests/test_inference_engine/test_executable_network.py @@ -0,0 +1,295 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import os +import pytest +import numpy as np + +from ..conftest import model_path, image_path +from openvino.impl import Function, ConstOutput, Shape + +from openvino import Core, Tensor + +is_myriad = os.environ.get("TEST_DEVICE") == "MYRIAD" +test_net_xml, test_net_bin = model_path(is_myriad) + + +def read_image(): + import cv2 + n, c, h, w = (1, 3, 32, 32) + image = cv2.imread(image_path()) + if image is None: + raise FileNotFoundError("Input image not found") + + image = cv2.resize(image, (h, w)) / 255 + image = image.transpose((2, 0, 1)).astype(np.float32) + image = image.reshape((n, c, h, w)) + return image + + +def test_get_metric(device): + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) + network_name = exec_net.get_metric("NETWORK_NAME") + assert network_name == "test_model" + + +@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", reason="Device dependent test") +def test_get_config(device): + core = Core() + if core.get_metric(device, "FULL_DEVICE_NAME") == "arm_compute::NEON": + pytest.skip("Can't run on ARM plugin due-to CPU dependent test") + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) + config = exec_net.get_config("PERF_COUNT") + assert config == "NO" + + +def test_get_runtime_function(device): + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) + runtime_func = exec_net.get_runtime_function() + assert isinstance(runtime_func, Function) + + +@pytest.mark.skip(reason="After infer will be implemented") +def test_export_import(): + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, "CPU") + exported_net_file = "exported_model.bin" + exec_net.export_model(network_model=exported_net_file) + assert os.path.exists(exported_net_file) + exec_net = core.import_network(exported_net_file, "CPU") + os.remove(exported_net_file) + img = read_image() + res = exec_net.infer({"data": img}) + assert np.argmax(res["fc_out"][0]) == 3 + del exec_net + del core + + +def test_get_input_i(device): + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) + input = exec_net.input(0) + input_node = input.get_node() + name = input_node.friendly_name + assert isinstance(input, ConstOutput) + assert name == "data" + + +def test_get_input_tensor_name(device): + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) + input = exec_net.input("data") + input_node = input.get_node() + name = input_node.friendly_name + assert isinstance(input, ConstOutput) + assert name == "data" + + +def test_get_input(device): + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) + input = exec_net.input() + input_node = input.get_node() + name = input_node.friendly_name + assert isinstance(input, ConstOutput) + assert name == "data" + + +def test_get_output_i(device): + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) + output = exec_net.output(0) + assert isinstance(output, ConstOutput) + + +def test_get_output(device): + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) + output = exec_net.output() + assert isinstance(output, ConstOutput) + + +def test_input_set_friendly_name(device): + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) + input = exec_net.input("data") + input_node = input.get_node() + input_node.set_friendly_name("input_1") + name = input_node.friendly_name + assert isinstance(input, ConstOutput) + assert name == "input_1" + + +def test_output_set_friendly_name(device): + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) + output = exec_net.output(0) + output_node = output.get_node() + output_node.set_friendly_name("output_1") + name = output_node.friendly_name + assert isinstance(output, ConstOutput) + assert name == "output_1" + + +def test_outputs(device): + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) + outputs = exec_net.outputs + assert isinstance(outputs, list) + assert len(outputs) == 1 + + +def test_outputs_items(device): + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) + outputs = exec_net.outputs + assert isinstance(outputs[0], ConstOutput) + + +def test_output_type(device): + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) + output = exec_net.output(0) + output_type = output.get_element_type().get_type_name() + assert output_type == "f32" + + +def test_output_shape(device): + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) + output = exec_net.output(0) + expected_shape = Shape([1, 10]) + assert str(output.get_shape()) == str(expected_shape) + + +def test_input_get_index(device): + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) + input = exec_net.input(0) + expected_idx = 0 + assert input.get_index() == expected_idx + + +def test_inputs(device): + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) + inputs = exec_net.inputs + assert isinstance(inputs, list) + assert len(inputs) == 1 + + +def test_inputs_items(device): + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) + inputs = exec_net.inputs + assert isinstance(inputs[0], ConstOutput) + + +def test_inputs_get_friendly_name(device): + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) + inputs = exec_net.inputs + input_0 = inputs[0] + node = input_0.get_node() + name = node.friendly_name + assert name == "data" + + +def test_inputs_set_friendly_name(device): + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) + inputs = exec_net.inputs + input_0 = inputs[0] + node = input_0.get_node() + node.set_friendly_name("input_0") + name = node.friendly_name + assert name == "input_0" + + +def test_inputs_docs(device): + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) + inputs = exec_net.inputs + input_0 = inputs[0] + expected_string = "openvino.impl.ConstOutput wraps ov::Output" + assert input_0.__doc__ == expected_string + + +def test_infer_new_request_numpy(device): + ie = Core() + func = ie.read_model(model=test_net_xml, weights=test_net_bin) + img = read_image() + exec_net = ie.compile_model(func, device) + res = exec_net.infer_new_request({"data": img}) + assert np.argmax(res) == 2 + + +def test_infer_new_request_tensor_numpy_copy(device): + ie = Core() + func = ie.read_model(model=test_net_xml, weights=test_net_bin) + img = read_image() + tensor = Tensor(img) + exec_net = ie.compile_model(func, device) + res_tensor = exec_net.infer_new_request({"data": tensor}) + res_img = exec_net.infer_new_request({"data": tensor}) + assert np.argmax(res_tensor) == 2 + assert np.argmax(res_tensor) == np.argmax(res_img) + + +def test_infer_tensor_numpy_shared_memory(device): + ie = Core() + func = ie.read_model(model=test_net_xml, weights=test_net_bin) + img = read_image() + img = np.ascontiguousarray(img) + tensor = Tensor(img, shared_memory=True) + exec_net = ie.compile_model(func, device) + res_tensor = exec_net.infer_new_request({"data": tensor}) + res_img = exec_net.infer_new_request({"data": tensor}) + assert np.argmax(res_tensor) == 2 + assert np.argmax(res_tensor) == np.argmax(res_img) + + +def test_infer_new_request_wrong_port_name(device): + ie = Core() + func = ie.read_model(model=test_net_xml, weights=test_net_bin) + img = read_image() + tensor = Tensor(img) + exec_net = ie.compile_model(func, device) + with pytest.raises(RuntimeError) as e: + exec_net.infer_new_request({"_data_": tensor}) + assert "Port for tensor name _data_ was not found." in str(e.value) + + +def test_infer_tensor_wrong_input_data(device): + ie = Core() + func = ie.read_model(model=test_net_xml, weights=test_net_bin) + img = read_image() + img = np.ascontiguousarray(img) + tensor = Tensor(img, shared_memory=True) + exec_net = ie.compile_model(func, device) + with pytest.raises(TypeError) as e: + exec_net.infer_new_request({4.5: tensor}) + assert "Incompatible key type!" in str(e.value) diff --git a/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py b/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py index 518a11cb37dead..d80907aedebc36 100644 --- a/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py +++ b/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py @@ -4,138 +4,186 @@ import numpy as np import os import pytest +import datetime +import time -from tests.test_inference_engine.helpers import model_path, read_image -from openvino import Core, Blob, TensorDesc, StatusCode - +from ..conftest import image_path, model_path +from openvino import Core, Tensor, ProfilingInfo is_myriad = os.environ.get("TEST_DEVICE") == "MYRIAD" test_net_xml, test_net_bin = model_path(is_myriad) -def test_get_perf_counts(device): - ie_core = Core() - net = ie_core.read_network(test_net_xml, test_net_bin) - ie_core.set_config({"PERF_COUNT": "YES"}, device) - exec_net = ie_core.load_network(net, device) - img = read_image() - request = exec_net.create_infer_request() - td = TensorDesc("FP32", [1, 3, 32, 32], "NCHW") - input_blob = Blob(td, img) - request.set_input({"data": input_blob}) - request.infer() - pc = request.get_perf_counts() - assert pc["29"]["status"] == "EXECUTED" - assert pc["29"]["layer_type"] == "FullyConnected" - del exec_net - del ie_core - del net - - -@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", - reason=f"Can't run test on device {os.environ.get('TEST_DEVICE', 'CPU')}, " - "Dynamic batch fully supported only on CPU") -@pytest.mark.skip(reason="Fix") -def test_set_batch_size(device): - ie_core = Core() - ie_core.set_config({"DYN_BATCH_ENABLED": "YES"}, device) - net = ie_core.read_network(test_net_xml, test_net_bin) - net.batch_size = 10 - data = np.ones(shape=net.input_info["data"].input_data.shape) - exec_net = ie_core.load_network(net, device) - data[0] = read_image()[0] - request = exec_net.create_infer_request() - request.set_batch(1) - td = TensorDesc("FP32", [1, 3, 32, 32], "NCHW") - input_blob = Blob(td, data) - request.set_input({"data": input_blob}) - request.infer() - assert np.allclose(int(round(request.output_blobs["fc_out"].buffer[0][2])), 1), \ - "Incorrect data for 1st batch" - del exec_net - del ie_core - del net - - -@pytest.mark.skip(reason="Fix") -def test_set_zero_batch_size(device): - ie_core = Core() - net = ie_core.read_network(test_net_xml, test_net_bin) - exec_net = ie_core.load_network(net, device) - request = exec_net.create_infer_request() - with pytest.raises(ValueError) as e: - request.set_batch(0) - assert "Batch size should be positive integer number but 0 specified" in str(e.value) - del exec_net - del ie_core - del net - - -@pytest.mark.skip(reason="Fix") -def test_set_negative_batch_size(device): - ie_core = Core() - net = ie_core.read_network(test_net_xml, test_net_bin) - exec_net = ie_core.load_network(net, device) - request = exec_net.create_infer_request() - with pytest.raises(ValueError) as e: - request.set_batch(-1) - assert "Batch size should be positive integer number but -1 specified" in str(e.value) - del exec_net - del ie_core - del net +def read_image(): + import cv2 + n, c, h, w = (1, 3, 32, 32) + image = cv2.imread(image_path()) + if image is None: + raise FileNotFoundError("Input image not found") + image = cv2.resize(image, (h, w)) / 255 + image = image.transpose((2, 0, 1)).astype(np.float32) + image = image.reshape((n, c, h, w)) + return image -def test_blob_setter(device): - ie_core = Core() - net = ie_core.read_network(test_net_xml, test_net_bin) - exec_net_1 = ie_core.load_network(network=net, device_name=device) - net.input_info["data"].layout = "NHWC" - exec_net_2 = ie_core.load_network(network=net, device_name=device) +def test_get_profiling_info(device): + core = Core() + func = core.read_model(test_net_xml, test_net_bin) + core.set_config({"PERF_COUNT": "YES"}, device) + exec_net = core.compile_model(func, device) + img = read_image() + request = exec_net.create_infer_request() + request.infer({0: img}) + prof_info = request.get_profiling_info() + soft_max_node = next(node for node in prof_info if node.node_name == "fc_out") + assert soft_max_node.node_type == "Softmax" + assert soft_max_node.status == ProfilingInfo.Status.OPTIMIZED_OUT + assert isinstance(soft_max_node.real_time, datetime.timedelta) + assert isinstance(soft_max_node.cpu_time, datetime.timedelta) + assert isinstance(soft_max_node.exec_type, str) + + +def test_tensor_setter(device): + core = Core() + func = core.read_model(test_net_xml, test_net_bin) + exec_net_1 = core.compile_model(network=func, device_name=device) + exec_net_2 = core.compile_model(network=func, device_name=device) img = read_image() + tensor = Tensor(img) request1 = exec_net_1.create_infer_request() - tensor_desc = TensorDesc("FP32", [1, 3, img.shape[2], img.shape[3]], "NCHW") - img_blob1 = Blob(tensor_desc, img) - request1.set_input({"data": img_blob1}) - request1.infer() - res_1 = np.sort(request1.get_blob("fc_out").buffer) - - img = np.transpose(img, axes=(0, 2, 3, 1)).astype(np.float32) - tensor_desc = TensorDesc("FP32", [1, 3, 32, 32], "NHWC") - img_blob = Blob(tensor_desc, img) + request1.set_tensor("data", tensor) + t1 = request1.get_tensor("data") + + assert np.allclose(tensor.data, t1.data, atol=1e-2, rtol=1e-2) + + res = request1.infer({0: tensor}) + res_1 = np.sort(res[0]) + t2 = request1.get_tensor("fc_out") + assert np.allclose(t2.data, res[0].data, atol=1e-2, rtol=1e-2) + request = exec_net_2.create_infer_request() - request.set_blob("data", img_blob) - request.infer() - res_2 = np.sort(request.get_blob("fc_out").buffer) + res = request.infer({"data": tensor}) + res_2 = np.sort(request.get_tensor("fc_out").data) assert np.allclose(res_1, res_2, atol=1e-2, rtol=1e-2) + request.set_tensor("data", tensor) + t3 = request.get_tensor("data") + assert np.allclose(t3.data, t1.data, atol=1e-2, rtol=1e-2) + + +def test_set_tensors(device): + core = Core() + func = core.read_model(test_net_xml, test_net_bin) + exec_net = core.compile_model(func, device) + + data1 = read_image() + tensor1 = Tensor(data1) + data2 = np.ones(shape=(1, 10), dtype=np.float32) + tensor2 = Tensor(data2) + data3 = np.ones(shape=(1, 3, 32, 32), dtype=np.float32) + tensor3 = Tensor(data3) + data4 = np.zeros(shape=(1, 10), dtype=np.float32) + tensor4 = Tensor(data4) + + request = exec_net.create_infer_request() + request.set_tensors({"data": tensor1, "fc_out": tensor2}) + t1 = request.get_tensor("data") + t2 = request.get_tensor("fc_out") + assert np.allclose(tensor1.data, t1.data, atol=1e-2, rtol=1e-2) + assert np.allclose(tensor2.data, t2.data, atol=1e-2, rtol=1e-2) + + request.set_output_tensors({0: tensor2}) + output_node = exec_net.outputs[0] + t3 = request.get_tensor(output_node) + assert np.allclose(tensor2.data, t3.data, atol=1e-2, rtol=1e-2) + + request.set_input_tensors({0: tensor1}) + output_node = exec_net.inputs[0] + t4 = request.get_tensor(output_node) + assert np.allclose(tensor1.data, t4.data, atol=1e-2, rtol=1e-2) + + output_node = exec_net.inputs[0] + request.set_tensor(output_node, tensor3) + t5 = request.get_tensor(output_node) + assert np.allclose(tensor3.data, t5.data, atol=1e-2, rtol=1e-2) + + request.set_input_tensor(tensor3) + t6 = request.get_tensor(request.inputs[0]) + assert np.allclose(tensor3.data, t6.data, atol=1e-2, rtol=1e-2) + + request.set_input_tensor(0, tensor1) + t7 = request.get_tensor(request.inputs[0]) + assert np.allclose(tensor1.data, t7.data, atol=1e-2, rtol=1e-2) + + request.set_output_tensor(tensor2) + t8 = request.get_tensor(request.outputs[0]) + assert np.allclose(tensor2.data, t8.data, atol=1e-2, rtol=1e-2) + + request.set_output_tensor(0, tensor4) + t9 = request.get_tensor(request.outputs[0]) + assert np.allclose(tensor4.data, t9.data, atol=1e-2, rtol=1e-2) + def test_cancel(device): - ie_core = Core() - net = ie_core.read_network(test_net_xml, test_net_bin) - exec_net = ie_core.load_network(net, device) + core = Core() + func = core.read_model(test_net_xml, test_net_bin) + exec_net = core.compile_model(func, device) img = read_image() - td = TensorDesc("FP32", [1, 3, 32, 32], "NCHW") - input_blob = Blob(td, img) request = exec_net.create_infer_request() - def callback(req, code, array): - array.append(42) - - data = [] - request.set_completion_callback(callback, data) - request.set_input({"data": input_blob}) - request.async_infer() + request.start_async({0: img}) request.cancel() with pytest.raises(RuntimeError) as e: request.wait() assert "[ INFER_CANCELLED ]" in str(e.value) - # check if callback has executed - assert data == [42] - request.async_infer() - status = request.wait() - assert status == StatusCode.OK - assert data == [42, 42] + request.start_async({"data": img}) + request.cancel() + with pytest.raises(RuntimeError) as e: + request.wait_for(1) + assert "[ INFER_CANCELLED ]" in str(e.value) + + +def test_start_async(device): + core = Core() + func = core.read_model(test_net_xml, test_net_bin) + exec_net = core.compile_model(func, device) + img = read_image() + jobs = 3 + requests = [] + for _ in range(jobs): + requests.append(exec_net.create_infer_request()) + + def callback(callbacks_info): + time.sleep(0.01) + callbacks_info["finished"] += 1 + + callbacks_info = {} + callbacks_info["finished"] = 0 + for request in requests: + request.set_callback(callback, callbacks_info) + request.start_async({0: img}) + for request in requests: + request.wait() + assert callbacks_info["finished"] == jobs + + +def test_infer_mixed_keys(device): + core = Core() + func = core.read_model(test_net_xml, test_net_bin) + core.set_config({"PERF_COUNT": "YES"}, device) + exec_net = core.compile_model(func, device) + + img = read_image() + tensor = Tensor(img) + + data2 = np.ones(shape=(1, 10), dtype=np.float32) + tensor2 = Tensor(data2) + + request = exec_net.create_infer_request() + with pytest.raises(TypeError) as e: + request.infer({0: tensor, "fc_out": tensor2}) + assert "incompatible function arguments!" in str(e.value) diff --git a/runtime/bindings/python/tests/test_inference_engine/test_output_const_node.py b/runtime/bindings/python/tests/test_inference_engine/test_output_const_node.py new file mode 100644 index 00000000000000..d7d1a2eea68a36 --- /dev/null +++ b/runtime/bindings/python/tests/test_inference_engine/test_output_const_node.py @@ -0,0 +1,83 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import os + +from ..conftest import model_path +from openvino.impl import ConstOutput, Shape, PartialShape, Type + +from openvino import Core + +is_myriad = os.environ.get("TEST_DEVICE") == "MYRIAD" +test_net_xml, test_net_bin = model_path(is_myriad) + + +def model_path(is_myriad=False): + path_to_repo = os.environ["MODELS_PATH"] + if not is_myriad: + test_xml = os.path.join(path_to_repo, "models", "test_model", "test_model_fp32.xml") + test_bin = os.path.join(path_to_repo, "models", "test_model", "test_model_fp32.bin") + else: + test_xml = os.path.join(path_to_repo, "models", "test_model", "test_model_fp16.xml") + test_bin = os.path.join(path_to_repo, "models", "test_model", "test_model_fp16.bin") + return (test_xml, test_bin) + + +def test_const_output_type(device): + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) + node = exec_net.input(0) + assert isinstance(node, ConstOutput) + + +def test_const_output_docs(device): + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) + node = exec_net.input(0) + exptected_string = "openvino.impl.ConstOutput wraps ov::Output" + assert node.__doc__ == exptected_string + + +def test_const_output_get_index(device): + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) + node = exec_net.input("data") + assert node.get_index() == 0 + + +def test_const_output_get_element_type(device): + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) + node = exec_net.input("data") + assert node.get_element_type() == Type.f32 + + +def test_const_output_get_shape(device): + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) + node = exec_net.input("data") + expected_shape = Shape([1, 3, 32, 32]) + assert str(node.get_shape()) == str(expected_shape) + + +def test_const_output_get_partial_shape(device): + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) + node = exec_net.input("data") + expected_partial_shape = PartialShape([1, 3, 32, 32]) + assert node.get_partial_shape() == expected_partial_shape + + +def test_const_output_get_target_inputs(device): + core = Core() + func = core.read_model(model=test_net_xml, weights=test_net_bin) + exec_net = core.compile_model(func, device) + outputs = exec_net.outputs + for node in outputs: + assert isinstance(node.get_target_inputs(), set) diff --git a/runtime/bindings/python/tests/test_inference_engine/test_tensor.py b/runtime/bindings/python/tests/test_inference_engine/test_tensor.py index d840bfa71f4a34..081334013a512b 100644 --- a/runtime/bindings/python/tests/test_inference_engine/test_tensor.py +++ b/runtime/bindings/python/tests/test_inference_engine/test_tensor.py @@ -4,11 +4,25 @@ import numpy as np import pytest -from tests.test_inference_engine.helpers import read_image +from ..conftest import image_path from openvino import Tensor import openvino as ov +def read_image(): + import cv2 + + n, c, h, w = (1, 3, 32, 32) + image = cv2.imread(image_path()) + if image is None: + raise FileNotFoundError("Input image not found") + + image = cv2.resize(image, (h, w)) / 255 + image = image.transpose((2, 0, 1)).astype(np.float32) + image = image.reshape((n, c, h, w)) + return image + + @pytest.mark.parametrize("ov_type, numpy_dtype", [ (ov.impl.Type.f32, np.float32), (ov.impl.Type.f64, np.float64), diff --git a/runtime/bindings/python/tests/test_ngraph/test_basic.py b/runtime/bindings/python/tests/test_ngraph/test_basic.py index 02fb9f4ca3acdb..2d7deb1f47b9d1 100644 --- a/runtime/bindings/python/tests/test_ngraph/test_basic.py +++ b/runtime/bindings/python/tests/test_ngraph/test_basic.py @@ -17,8 +17,6 @@ from tests.runtime import get_runtime from tests.test_ngraph.util import run_op_node -from tests import skip_issue_67415 - def test_ngraph_function_api(): shape = [2, 2] @@ -35,6 +33,12 @@ def test_ngraph_function_api(): assert op_types == ["Parameter", "Parameter", "Parameter", "Add", "Multiply", "Result"] assert len(function.get_ops()) == 6 assert function.get_output_size() == 1 + assert ["A", "B", "C"] == [input.get_node().friendly_name for input in function.inputs] + assert ["Result"] == [output.get_node().get_type_name() for output in function.outputs] + assert function.input(0).get_node().friendly_name == "A" + assert function.output(0).get_node().get_type_name() == "Result" + assert function.input(tensor_name="A").get_node().friendly_name == "A" + assert function.output().get_node().get_type_name() == "Result" assert function.get_output_op(0).get_type_name() == "Result" assert function.get_output_element_type(0) == parameter_a.get_element_type() assert list(function.get_output_shape(0)) == [2, 2] @@ -48,7 +52,7 @@ def test_ngraph_function_api(): "dtype", [ np.float32, - pytest.param(np.float64, marks=skip_issue_67415), + np.float64, np.int8, np.int16, np.int32, @@ -173,7 +177,7 @@ def test_convert_to_float(destination_type, rand_range, in_dtype, expected_type) ) def test_convert_to_int(destination_type, expected_type): np.random.seed(133391) - input_data = (np.ceil(-8 + np.random.rand(2, 3, 4) * 16)).astype(np.float32) + input_data = (np.ceil(-8 + np.random.rand(2, 3, 4) * 16)).astype(expected_type) expected = np.array(input_data, dtype=expected_type) result = run_op_node([input_data], ops.convert, destination_type) assert np.allclose(result, expected) @@ -195,7 +199,7 @@ def test_convert_to_int(destination_type, expected_type): ) def test_convert_to_uint(destination_type, expected_type): np.random.seed(133391) - input_data = np.ceil(np.random.rand(2, 3, 4) * 16).astype(np.float32) + input_data = np.ceil(np.random.rand(2, 3, 4) * 16).astype(expected_type) expected = np.array(input_data, dtype=expected_type) result = run_op_node([input_data], ops.convert, destination_type) assert np.allclose(result, expected) diff --git a/runtime/bindings/python/tests/test_ngraph/test_sequence_processing.py b/runtime/bindings/python/tests/test_ngraph/test_sequence_processing.py index ebef086f174954..c394225230e8bd 100644 --- a/runtime/bindings/python/tests/test_ngraph/test_sequence_processing.py +++ b/runtime/bindings/python/tests/test_ngraph/test_sequence_processing.py @@ -6,8 +6,7 @@ import openvino.opset8 as ov from tests.runtime import get_runtime from tests.test_ngraph.util import run_op_node -from tests import (xfail_issue_47337, - xfail_accuracy) +from tests import (xfail_issue_47337) def test_onehot(): @@ -35,7 +34,6 @@ def test_one_hot(): assert np.allclose(result, excepted) -@xfail_accuracy def test_range(): start = 5 stop = 35 diff --git a/runtime/bindings/python/tests/test_onnx/test_backend.py b/runtime/bindings/python/tests/test_onnx/test_backend.py index 70b6702adfcd7b..381c0da547be2e 100644 --- a/runtime/bindings/python/tests/test_onnx/test_backend.py +++ b/runtime/bindings/python/tests/test_onnx/test_backend.py @@ -6,8 +6,8 @@ import onnx.backend.test from tests import ( BACKEND_NAME, + xfail_issue_FLOAT_LIKE, skip_rng_tests, - xfail_issue_67415, xfail_issue_33488, xfail_issue_33538, xfail_issue_33581, @@ -103,11 +103,15 @@ def expect_fail(test_case_path, xfail): # type: (str) -> None tests_expected_to_fail = [ ( - xfail_issue_67415, + xfail_issue_FLOAT_LIKE, + "OnnxBackendNodeModelTest.test_cast_BFLOAT16_to_FLOAT_cpu", "OnnxBackendNodeModelTest.test_cast_FLOAT16_to_DOUBLE_cpu", "OnnxBackendNodeModelTest.test_cast_FLOAT16_to_FLOAT_cpu", + "OnnxBackendNodeModelTest.test_cast_FLOAT_to_BFLOAT16_cpu", + "OnnxBackendNodeModelTest.test_castlike_BFLOAT16_to_FLOAT_expanded_cpu", "OnnxBackendNodeModelTest.test_castlike_FLOAT16_to_DOUBLE_expanded_cpu", "OnnxBackendNodeModelTest.test_castlike_FLOAT16_to_FLOAT_expanded_cpu", + "OnnxBackendNodeModelTest.test_castlike_FLOAT_to_BFLOAT16_expanded_cpu", "OnnxBackendNodeModelTest.test_max_float16_cpu", "OnnxBackendNodeModelTest.test_min_float16_cpu", "OnnxBackendNodeModelTest.test_mod_mixed_sign_float16_cpu", diff --git a/runtime/bindings/python/tests/test_onnx/test_onnx_external_data.py b/runtime/bindings/python/tests/test_onnx/test_onnx_external_data.py index cbeb316c79ad6c..8fd4fb89ca582d 100644 --- a/runtime/bindings/python/tests/test_onnx/test_onnx_external_data.py +++ b/runtime/bindings/python/tests/test_onnx/test_onnx_external_data.py @@ -12,9 +12,7 @@ def test_import_onnx_with_external_data(): model_path = os.path.join(os.path.dirname(__file__), "models/external_data.onnx") ie = Core() - network = ie.read_network(model=model_path) - - ng_function = network.get_function() + func = ie.read_model(model=model_path) dtype = np.float32 value_a = np.array([1.0, 3.0, 5.0], dtype=dtype) @@ -22,6 +20,6 @@ def test_import_onnx_with_external_data(): # third input [5.0, 1.0, 3.0] read from external file runtime = get_runtime() - computation = runtime.computation(ng_function) + computation = runtime.computation(func) result = computation(value_a, value_b) assert np.allclose(result, np.array([3.0, 3.0, 3.0], dtype=dtype)) diff --git a/runtime/bindings/python/tests/test_onnx/test_onnx_import.py b/runtime/bindings/python/tests/test_onnx/test_onnx_import.py index 2886ff592f3ef3..7f5387d20284f9 100644 --- a/runtime/bindings/python/tests/test_onnx/test_onnx_import.py +++ b/runtime/bindings/python/tests/test_onnx/test_onnx_import.py @@ -15,9 +15,7 @@ def test_import_onnx_function(): model_path = os.path.join(os.path.dirname(__file__), "models/add_abc.onnx") ie = Core() - network = ie.read_network(model=model_path) - - ng_function = network.get_function() + func = ie.read_model(model=model_path) dtype = np.float32 value_a = np.array([1.0], dtype=dtype) @@ -25,7 +23,7 @@ def test_import_onnx_function(): value_c = np.array([3.0], dtype=dtype) runtime = get_runtime() - computation = runtime.computation(ng_function) + computation = runtime.computation(func) result = computation(value_a, value_b, value_c) assert np.allclose(result, np.array([6], dtype=dtype)) @@ -49,5 +47,19 @@ def test_simple_graph(): runtime = get_runtime() computation = runtime.computation(ng_model_function) - assert np.array_equal(computation(1, 2, 3)[0], np.array([6.0], dtype=np.float32)) - assert np.array_equal(computation(4, 5, 6)[0], np.array([15.0], dtype=np.float32)) + assert np.array_equal( + computation( + np.array([1], dtype=np.float32), + np.array([2], dtype=np.float32), + np.array([3], dtype=np.float32), + )[0], + np.array([6.0], dtype=np.float32), + ) + assert np.array_equal( + computation( + np.array([4], dtype=np.float32), + np.array([5], dtype=np.float32), + np.array([6], dtype=np.float32), + )[0], + np.array([15.0], dtype=np.float32), + ) diff --git a/runtime/bindings/python/tests/test_onnx/test_ops_unary.py b/runtime/bindings/python/tests/test_onnx/test_ops_unary.py index 041466663d23a5..201458c629a60b 100644 --- a/runtime/bindings/python/tests/test_onnx/test_ops_unary.py +++ b/runtime/bindings/python/tests/test_onnx/test_ops_unary.py @@ -11,8 +11,6 @@ from tests.runtime import get_runtime from tests.test_onnx.utils import get_node_model, import_onnx_model, run_model, run_node -from tests import skip_issue_67415 - @pytest.mark.parametrize( "input_data", @@ -333,7 +331,6 @@ def test_cast_to_bool(val_type, input_data): assert np.allclose(result, expected) -@skip_issue_67415 @pytest.mark.parametrize( "val_type, range_start, range_end, in_dtype", [ @@ -359,7 +356,7 @@ def test_cast_to_float(val_type, range_start, range_end, in_dtype): ) def test_cast_to_int(val_type): np.random.seed(133391) - input_data = np.ceil(-8 + np.random.rand(2, 3, 4) * 16) + input_data = np.ceil(-8 + np.random.rand(2, 3, 4) * 16).astype(val_type) expected = np.array(input_data, dtype=val_type) model = get_node_model("Cast", input_data, opset=6, to=onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[val_type]) @@ -372,7 +369,7 @@ def test_cast_to_int(val_type): ) def test_cast_to_uint(val_type): np.random.seed(133391) - input_data = np.ceil(np.random.rand(2, 3, 4) * 16) + input_data = np.ceil(np.random.rand(2, 3, 4) * 16).astype(val_type) expected = np.array(input_data, dtype=val_type) model = get_node_model("Cast", input_data, opset=6, to=onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[val_type]) diff --git a/runtime/bindings/python/tests/test_onnx/utils/onnx_helpers.py b/runtime/bindings/python/tests/test_onnx/utils/onnx_helpers.py index 53c5487d31476f..570c82619eda03 100644 --- a/runtime/bindings/python/tests/test_onnx/utils/onnx_helpers.py +++ b/runtime/bindings/python/tests/test_onnx/utils/onnx_helpers.py @@ -5,7 +5,7 @@ import onnx from onnx.mapping import NP_TYPE_TO_TENSOR_TYPE -from openvino import Core, Blob, TensorDesc +from openvino import Core, Tensor from openvino.impl import Function @@ -21,9 +21,7 @@ def np_dtype_to_tensor_type(data_type: np.dtype) -> int: def import_onnx_model(model: onnx.ModelProto) -> Function: onnx.checker.check_model(model) model_byte_string = model.SerializeToString() - ie = Core() - ie_network = ie.read_network(bytes(model_byte_string), Blob(TensorDesc("U8", [], "C"))) + func = ie.read_model(bytes(model_byte_string), Tensor(type=np.uint8, shape=[])) - ng_function = ie_network.get_function() - return ng_function + return func